From 6ab50f8c3cd381af277afa8cc191a0e013e19da2 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 1 Aug 2013 20:34:59 +0000 Subject: [PATCH 01/51] create branch for HDFS-4949 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1509426 13f79535-47bb-0310-9956-ffa450edef68 From 2a4031940c6ffe16f38164316305ff531142aff3 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 9 Aug 2013 18:14:07 +0000 Subject: [PATCH 02/51] HDFS-5049. Add JNI mlock support. (Andrew Wang via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1512427 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/io/nativeio/NativeIO.java | 85 +++++++++++++++++++ .../org/apache/hadoop/io/nativeio/NativeIO.c | 73 ++++++++++++++++ .../hadoop/io/nativeio/TestNativeIO.java | 58 +++++++++++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 14 +++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hadoop/hdfs/server/datanode/DNConf.java | 10 +++ .../hadoop/hdfs/server/datanode/DataNode.java | 21 +++++ .../hadoop/hdfs/TestDatanodeConfig.java | 25 ++++++ 8 files changed, 288 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 1412d610431..96193eed035 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -23,6 +23,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; +import java.nio.ByteBuffer; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -145,6 +146,12 @@ public static boolean isAvailable() { return NativeCodeLoader.isNativeCodeLoaded() && nativeLoaded; } + private static void assertCodeLoaded() throws IOException { + if (!isAvailable()) { + throw new IOException("NativeIO was not loaded"); + } + } + /** Wrapper around open(2) */ public static native FileDescriptor open(String path, int flags, int mode) throws IOException; /** Wrapper around fstat(2) */ @@ -225,6 +232,84 @@ public static void syncFileRangeIfPossible( } } + static native void mlock_native( + ByteBuffer buffer, long len) throws NativeIOException; + static native void munlock_native( + ByteBuffer buffer, long len) throws NativeIOException; + + /** + * Locks the provided direct ByteBuffer into memory, preventing it from + * swapping out. After a buffer is locked, future accesses will not incur + * a page fault. + * + * See the mlock(2) man page for more information. + * + * @throws NativeIOException + */ + public static void mlock(ByteBuffer buffer, long len) + throws IOException { + assertCodeLoaded(); + if (!buffer.isDirect()) { + throw new IOException("Cannot mlock a non-direct ByteBuffer"); + } + mlock_native(buffer, len); + } + + /** + * Unlocks a locked direct ByteBuffer, allowing it to swap out of memory. + * This is a no-op if the ByteBuffer was not previously locked. + * + * See the munlock(2) man page for more information. + * + * @throws NativeIOException + */ + public static void munlock(ByteBuffer buffer, long len) + throws IOException { + assertCodeLoaded(); + if (!buffer.isDirect()) { + throw new IOException("Cannot munlock a non-direct ByteBuffer"); + } + munlock_native(buffer, len); + } + + /** + * Resource limit types copied from + */ + private static class ResourceLimit { + public static final int RLIMIT_CPU = 0; + public static final int RLIMIT_FSIZE = 1; + public static final int RLIMIT_DATA = 2; + public static final int RLIMIT_STACK = 3; + public static final int RLIMIT_CORE = 4; + public static final int RLIMIT_RSS = 5; + public static final int RLIMIT_NPROC = 6; + public static final int RLIMIT_NOFILE = 7; + public static final int RLIMIT_MEMLOCK = 8; + public static final int RLIMIT_AS = 9; + public static final int RLIMIT_LOCKS = 10; + public static final int RLIMIT_SIGPENDING = 11; + public static final int RLIMIT_MSGQUEUE = 12; + public static final int RLIMIT_NICE = 13; + public static final int RLIMIT_RTPRIO = 14; + public static final int RLIMIT_RTTIME = 15; + public static final int RLIMIT_NLIMITS = 16; + } + + static native String getrlimit(int limit) throws NativeIOException; + /** + * Returns the soft limit on the number of bytes that may be locked by the + * process in bytes (RLIMIT_MEMLOCK). + * + * See the getrlimit(2) man page for more information + * + * @return maximum amount of locked memory in bytes + */ + public static long getMemlockLimit() throws IOException { + assertCodeLoaded(); + String strLimit = getrlimit(ResourceLimit.RLIMIT_MEMLOCK); + return Long.parseLong(strLimit); + } + /** Linux only methods used for getOwner() implementation */ private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException; private static native String getUserName(long uid) throws IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index cb21a7bee66..afa4720e507 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -31,8 +31,11 @@ #include #include #include +#include +#include #include #include +#include #include #include #include "config.h" @@ -360,6 +363,76 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_sync_1file_1range( #endif } +/** + * public static native void mlock_native( + * ByteBuffer buffer, long offset); + * + * The "00024" in the function name is an artifact of how JNI encodes + * special characters. U+0024 is '$'. + */ +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native( + JNIEnv *env, jclass clazz, + jobject buffer, jlong len) +{ + void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer); + PASS_EXCEPTIONS(env); + + if (mlock(buf, len)) { + throw_ioe(env, errno); + } +} + +/** + * public static native void munlock_native( + * ByteBuffer buffer, long offset); + * + * The "00024" in the function name is an artifact of how JNI encodes + * special characters. U+0024 is '$'. + */ +JNIEXPORT void JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native( + JNIEnv *env, jclass clazz, + jobject buffer, jlong len) +{ + void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer); + PASS_EXCEPTIONS(env); + + if (munlock(buf, len)) { + throw_ioe(env, errno); + } +} + +/** + * public static native String getrlimit( + * int resource); + * + * The "00024" in the function name is an artifact of how JNI encodes + * special characters. U+0024 is '$'. + */ +JNIEXPORT jstring JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getrlimit( + JNIEnv *env, jclass clazz, + jint resource) +{ + jstring ret = NULL; + + struct rlimit rlim; + int rc = getrlimit((int)resource, &rlim); + if (rc != 0) { + throw_ioe(env, errno); + goto cleanup; + } + + // Convert soft limit into a string + char limit[17]; + int len = snprintf(&limit, 17, "%d", rlim.rlim_cur); + ret = (*env)->NewStringUTF(env,&limit); + +cleanup: + return ret; +} + #ifdef __FreeBSD__ static int toFreeBSDFlags(int flags) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 4d71e15c4b3..69c963f2d75 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -24,6 +24,9 @@ import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileChannel.MapMode; import java.util.concurrent.atomic.AtomicReference; import java.util.ArrayList; import java.util.Arrays; @@ -32,6 +35,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; + import static org.junit.Assume.*; import static org.junit.Assert.*; @@ -45,6 +49,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.NativeCodeLoader; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Time; public class TestNativeIO { @@ -524,4 +529,57 @@ public void testRenameTo() throws Exception { FileUtils.deleteQuietly(TEST_DIR); } + + @Test(timeout=10000) + public void testMlock() throws Exception { + assumeTrue(NativeIO.isAvailable()); + assumeTrue(Shell.LINUX); + final File TEST_FILE = new File(new File( + System.getProperty("test.build.data","build/test/data")), + "testMlockFile"); + final int BUF_LEN = 12289; + byte buf[] = new byte[BUF_LEN]; + int bufSum = 0; + for (int i = 0; i < buf.length; i++) { + buf[i] = (byte)(i % 60); + bufSum += buf[i]; + } + FileOutputStream fos = new FileOutputStream(TEST_FILE); + fos.write(buf); + fos.getChannel().force(true); + fos.close(); + + FileInputStream fis = null; + FileChannel channel = null; + try { + // Map file into memory + fis = new FileInputStream(TEST_FILE); + channel = fis.getChannel(); + long fileSize = channel.size(); + MappedByteBuffer mapbuf = channel.map(MapMode.READ_ONLY, 0, fileSize); + // mlock the buffer + NativeIO.POSIX.mlock(mapbuf, fileSize); + // Read the buffer + int sum = 0; + for (int i=0; i 0) { + if (!NativeIO.isAvailable()) { + throw new RuntimeException(String.format( + "Cannot start datanode because the configured max locked memory" + + " size (%s) is greater than zero and native code is not available.", + DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); + } + long ulimit = NativeIO.POSIX.getMemlockLimit(); + if (dnConf.maxLockedMemory > ulimit) { + throw new RuntimeException(String.format( + "Cannot start datanode because the configured max locked memory" + + " size (%s) of %d bytes is less than the datanode's available" + + " RLIMIT_MEMLOCK ulimit of %d bytes.", + DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + dnConf.maxLockedMemory, + ulimit)); + } + } + storage = new DataStorage(); // global DN settings diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index 62565170bb0..f2166b74115 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; import java.io.File; import java.io.IOException; @@ -30,6 +31,8 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -106,4 +109,26 @@ private static String makeURI(String scheme, String host, String path) throw new IOException("Bad URI", e); } } + + @Test(timeout=60000) + public void testMemlockLimit() throws Exception { + assumeTrue(NativeIO.isAvailable()); + final long memlockLimit = NativeIO.POSIX.getMemlockLimit(); + Configuration conf = cluster.getConfiguration(0); + // Try starting the DN with limit configured to the ulimit + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + memlockLimit); + DataNode dn = null; + dn = DataNode.createDataNode(new String[]{}, conf); + dn.shutdown(); + // Try starting the DN with a limit > ulimit + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + memlockLimit+1); + try { + dn = DataNode.createDataNode(new String[]{}, conf); + } catch (RuntimeException e) { + GenericTestUtils.assertExceptionContains( + "less than the datanode's available RLIMIT_MEMLOCK", e); + } + } } From 52ccc6c6d539d0587c3fd9693709bd1f6e12619d Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 13 Aug 2013 21:05:09 +0000 Subject: [PATCH 03/51] HDFS-5051. Propagate cache status information from the DataNode to the NameNode (Andrew Wang via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1513653 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++ ...atanodeProtocolClientSideTranslatorPB.java | 26 ++++++++++++++ ...atanodeProtocolServerSideTranslatorPB.java | 24 +++++++++++++ .../server/blockmanagement/BlockManager.java | 9 +++++ .../hdfs/server/datanode/BPOfferService.java | 6 ++++ .../hdfs/server/datanode/BPServiceActor.java | 36 ++++++++++++++++--- .../hadoop/hdfs/server/datanode/DNConf.java | 3 ++ .../hadoop/hdfs/server/datanode/DataNode.java | 1 + .../server/namenode/NameNodeRpcServer.java | 12 +++++++ .../server/protocol/DatanodeProtocol.java | 21 ++++++++++- .../src/main/proto/DatanodeProtocol.proto | 24 ++++++++++++- 12 files changed, 160 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 375f0bfb7c6..d12d273abea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -9,6 +9,9 @@ HDFS-4949 (Unreleased) IMPROVEMENTS HDFS-5049. Add JNI mlock support. (Andrew Wang via Colin Patrick McCabe) + HDFS-5051. Propagate cache status information from the DataNode to the + NameNode (Andrew Wang via Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index daf38827f0e..f392df8d9ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -354,6 +354,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 60 * 60 * 1000; public static final String DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay"; public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0; + public static final String DFS_CACHEREPORT_INTERVAL_MSEC_KEY = "dfs.cachereport.intervalMsec"; + public static final long DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 1000; public static final String DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit"; public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000; public static final String DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = "dfs.corruptfilesreturned.max"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index fd4cc4b01c5..cf3921cf12e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; @@ -202,6 +205,29 @@ public DatanodeCommand blockReport(DatanodeRegistration registration, return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; } + @Override + public DatanodeCommand cacheReport(DatanodeRegistration registration, + String poolId, long[] blocks) throws IOException { + CacheReportRequestProto.Builder builder = + CacheReportRequestProto.newBuilder() + .setRegistration(PBHelper.convert(registration)) + .setBlockPoolId(poolId); + for (int i=0; i 0) { + // Uniform random jitter by the delay + lastCacheReport = Time.monotonicNow() + - dnConf.cacheReportInterval + + DFSUtil.getRandom().nextInt(((int)delay)); + } else { // send at next heartbeat + lastCacheReport = lastCacheReport - dnConf.cacheReportInterval; + } + } + void reportBadBlocks(ExtendedBlock block) { if (bpRegistration == null) { return; @@ -430,6 +443,15 @@ DatanodeCommand blockReport() throws IOException { return cmd; } + DatanodeCommand cacheReport() throws IOException { + // send cache report if timer has expired. + DatanodeCommand cmd = null; + long startTime = Time.monotonicNow(); + if (startTime - lastCacheReport > dnConf.cacheReportInterval) { + // TODO: Implement me! + } + return cmd; + } HeartbeatResponse sendHeartBeat() throws IOException { if (LOG.isDebugEnabled()) { @@ -496,11 +518,12 @@ private synchronized void cleanUp() { * forever calling remote NameNode functions. */ private void offerService() throws Exception { - LOG.info("For namenode " + nnAddr + " using DELETEREPORT_INTERVAL of " - + dnConf.deleteReportInterval + " msec " + " BLOCKREPORT_INTERVAL of " - + dnConf.blockReportInterval + "msec" + " Initial delay: " - + dnConf.initialBlockReportDelay + "msec" + "; heartBeatInterval=" - + dnConf.heartBeatInterval); + LOG.info("For namenode " + nnAddr + " using" + + " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec " + + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec" + + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec" + + " Initial delay: " + dnConf.initialBlockReportDelay + "msec" + + "; heartBeatInterval=" + dnConf.heartBeatInterval); // // Now loop for a long time.... @@ -555,6 +578,9 @@ private void offerService() throws Exception { DatanodeCommand cmd = blockReport(); processCommand(new DatanodeCommand[]{ cmd }); + cmd = cacheReport(); + processCommand(new DatanodeCommand[]{ cmd }); + // Now safe to start scanning the block pool. // If it has already been started, this is a no-op. if (dn.blockScanner != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index c022ca368a7..8b21577b6a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -70,6 +70,7 @@ public class DNConf { final long blockReportInterval; final long deleteReportInterval; final long initialBlockReportDelay; + final long cacheReportInterval; final int writePacketSize; final String minimumNameNodeVersion; @@ -114,6 +115,8 @@ public DNConf(Configuration conf) { DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); + this.cacheReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, + DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); long initBRDelay = conf.getLong( DFS_BLOCKREPORT_INITIAL_DELAY_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 7ff26c8134f..65a1c922b94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1916,6 +1916,7 @@ static StartupOption getStartupOption(Configuration conf) { public void scheduleAllBlockReport(long delay) { for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { bpos.scheduleBlockReport(delay); + bpos.scheduleCacheReport(delay); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 3f26582e721..e0800147344 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -951,6 +951,18 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg, return null; } + @Override + public DatanodeCommand cacheReport(DatanodeRegistration nodeReg, + String poolId, long[] blocks) throws IOException { + verifyRequest(nodeReg); + BlockListAsLongs blist = new BlockListAsLongs(blocks); + namesystem.getBlockManager().processCacheReport(nodeReg, poolId, blist); + if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState()) { + return new FinalizeCommand(poolId); + } + return null; + } + @Override // DatanodeProtocol public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 27a10998d44..0bdda59d642 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -22,10 +22,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.io.retry.AtMostOnce; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; @@ -128,6 +128,25 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports) throws IOException; + + /** + * Communicates the complete list of locally cached blocks to the NameNode. + * + * This method is similar to + * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[])}, + * which is used to communicated blocks stored on disk. + * + * @param registration + * @param poolId block pool ID for the blocks + * @param blocks a Long[] array from {@link BlockListAsLongs} that describes + * the list of cached blocks. This is more memory-efficient than a Block[]. + * @return + * @throws IOException + */ + @Idempotent + public DatanodeCommand cacheReport(DatanodeRegistration registration, + String poolId, long[] blocks) throws IOException; + /** * blockReceivedAndDeleted() allows the DataNode to tell the NameNode about * recently-received and -deleted block data. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 3b9b90b5d27..4c0c5a43a50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -205,9 +205,11 @@ message HeartbeatResponseProto { /** * registration - datanode registration information * blockPoolID - block pool ID of the reported blocks - * blocks - each block is represented as two longs in the array. + * blocks - each block is represented as multiple longs in the array. * first long represents block ID * second long represents length + * third long represents gen stamp + * fourth long (if under construction) represents replica state */ message BlockReportRequestProto { required DatanodeRegistrationProto registration = 1; @@ -230,6 +232,21 @@ message BlockReportResponseProto { optional DatanodeCommandProto cmd = 1; } +/** + * registration - datanode registration information + * blockPoolId - block pool ID of the reported blocks + * blocks - representation of blocks as longs for efficiency reasons + */ +message CacheReportRequestProto { + required DatanodeRegistrationProto registration = 1; + required string blockPoolId = 2; + repeated uint64 blocks = 3 [packed=true]; +} + +message CacheReportResponseProto { + optional DatanodeCommandProto cmd = 1; +} + /** * Data structure to send received or deleted block information * from datanode to namenode. @@ -347,6 +364,11 @@ service DatanodeProtocolService { */ rpc blockReport(BlockReportRequestProto) returns(BlockReportResponseProto); + /** + * Report cached blocks at a datanode to the namenode + */ + rpc cacheReport(CacheReportRequestProto) returns(CacheReportResponseProto); + /** * Incremental block report from the DN. This contains info about recently * received and deleted blocks, as well as when blocks start being From 67f86baab0c8a95d6ae5d1f431fa06764e46c79d Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 20 Aug 2013 18:07:47 +0000 Subject: [PATCH 04/51] HDFS-4953. Enable HDFS local reads via mmap. Contributed by Colin Patrick McCabe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1515906 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/fs/FSDataInputStream.java | 17 +- .../org/apache/hadoop/fs/FSInputStream.java | 11 +- .../apache/hadoop/fs/SupportsZeroCopy.java | 44 ++ .../org/apache/hadoop/fs/ZeroCopyCursor.java | 111 ++++ .../fs/ZeroCopyUnavailableException.java | 36 ++ .../dev-support/findbugsExcludeFile.xml | 10 + .../hadoop-hdfs/src/CMakeLists.txt | 11 + .../org/apache/hadoop/hdfs/BlockReader.java | 19 + .../apache/hadoop/hdfs/BlockReaderLocal.java | 56 +++ .../hadoop/hdfs/BlockReaderLocalLegacy.java | 9 + .../org/apache/hadoop/hdfs/DFSClient.java | 51 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 + .../apache/hadoop/hdfs/DFSInputStream.java | 82 +++ .../hadoop/hdfs/HdfsZeroCopyCursor.java | 148 ++++++ .../apache/hadoop/hdfs/RemoteBlockReader.java | 9 + .../hadoop/hdfs/RemoteBlockReader2.java | 10 +- .../apache/hadoop/hdfs/client/ClientMmap.java | 166 ++++++ .../hadoop/hdfs/client/ClientMmapManager.java | 476 ++++++++++++++++++ .../src/main/native/libhdfs/expect.c | 52 ++ .../src/main/native/libhdfs/expect.h | 63 ++- .../src/main/native/libhdfs/hdfs.c | 305 +++++++++-- .../src/main/native/libhdfs/hdfs.h | 85 +++- .../src/main/native/libhdfs/hdfs_test.h | 9 + .../src/main/native/libhdfs/jni_helper.c | 39 ++ .../src/main/native/libhdfs/jni_helper.h | 26 + .../src/main/native/libhdfs/native_mini_dfs.c | 97 ++++ .../src/main/native/libhdfs/native_mini_dfs.h | 22 +- .../libhdfs/test/test_libhdfs_zerocopy.c | 225 +++++++++ .../src/main/resources/hdfs-default.xml | 28 ++ .../hadoop/hdfs/TestBlockReaderLocal.java | 348 ++++++++++++- 30 files changed, 2508 insertions(+), 63 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java index 5c032c3a6a5..25a971447f2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java @@ -28,9 +28,9 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class FSDataInputStream extends DataInputStream - implements Seekable, PositionedReadable, Closeable, - ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead { - + implements Seekable, PositionedReadable, Closeable, + ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead, + SupportsZeroCopy { public FSDataInputStream(InputStream in) throws IOException { super(in); @@ -167,4 +167,15 @@ public void setDropBehind(Boolean dropBehind) "support setting the drop-behind caching setting."); } } + + @Override + public ZeroCopyCursor createZeroCopyCursor() + throws IOException, ZeroCopyUnavailableException { + try { + return ((SupportsZeroCopy)in).createZeroCopyCursor(); + } + catch (ClassCastException e) { + throw new ZeroCopyUnavailableException(e); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java index 8d668feeaba..e3308814ce2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java @@ -18,9 +18,11 @@ package org.apache.hadoop.fs; import java.io.*; +import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.ZeroCopyUnavailableException; /**************************************************************** * FSInputStream is a generic old InputStream with a little bit @@ -30,7 +32,7 @@ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable public abstract class FSInputStream extends InputStream - implements Seekable, PositionedReadable { + implements Seekable, PositionedReadable, SupportsZeroCopy { /** * Seek to the given offset from the start of the file. * The next read() will be from that location. Can't @@ -86,4 +88,11 @@ public void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); } + + @Override + public ZeroCopyCursor createZeroCopyCursor() + throws IOException, ZeroCopyUnavailableException { + throw new ZeroCopyUnavailableException("zero copy is not implemented " + + "for this filesystem type."); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java new file mode 100644 index 00000000000..2a4d51da07a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Supports zero-copy reads. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface SupportsZeroCopy { + /** + * Get a zero-copy cursor to use for zero-copy reads. + * + * @throws IOException + * If there was an error creating the ZeroCopyCursor + * @throws UnsupportedOperationException + * If this stream does not support zero-copy reads. + * This is used, for example, when one stream wraps another + * which may or may not support ZCR. + */ + public ZeroCopyCursor createZeroCopyCursor() + throws IOException, ZeroCopyUnavailableException; +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java new file mode 100644 index 00000000000..5181b49da2d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * A ZeroCopyCursor allows you to make zero-copy reads. + * + * Cursors should be closed when they are no longer needed. + * + * Example: + * FSDataInputStream fis = fs.open("/file"); + * ZeroCopyCursor cursor = fis.createZeroCopyCursor(); + * try { + * cursor.read(128); + * ByteBuffer data = cursor.getData(); + * processData(data); + * } finally { + * cursor.close(); + * } + */ +public interface ZeroCopyCursor extends Closeable { + /** + * Set the fallback buffer used for this zero copy cursor. + * The fallback buffer is used when a true zero-copy read is impossible. + * If there is no fallback buffer, UnsupportedOperationException is thrown + * when a true zero-copy read cannot be done. + * + * @param fallbackBuffer The fallback buffer to set, or null for none. + */ + public void setFallbackBuffer(ByteBuffer fallbackBuffer); + + /** + * @return the fallback buffer in use, or null if there is none. + */ + public ByteBuffer getFallbackBuffer(); + + /** + * @param skipChecksums Whether we should skip checksumming with this + * zero copy cursor. + */ + public void setSkipChecksums(boolean skipChecksums); + + /** + * @return Whether we should skip checksumming with this + * zero copy cursor. + */ + public boolean getSkipChecksums(); + + /** + * @param allowShortReads Whether we should allow short reads. + */ + public void setAllowShortReads(boolean allowShortReads); + + /** + * @return Whether we should allow short reads. + */ + public boolean getAllowShortReads(); + + /** + * Perform a zero-copy read. + * + * @param toRead The minimum number of bytes to read. + * Must not be negative. If we hit EOF before + * reading this many bytes, we will either throw + * EOFException (if allowShortReads = false), or + * return a short read (if allowShortReads = true). + * A short read could be as short as 0 bytes. + * @throws UnsupportedOperationException + * If a true zero-copy read cannot be done, and no fallback + * buffer was set. + * @throws EOFException + * If allowShortReads = false, and we can't read all the bytes + * that were requested. This will never be thrown if + * allowShortReads = true. + * @throws IOException + * If there was an error while reading the data. + */ + public void read(int toRead) + throws UnsupportedOperationException, EOFException, IOException; + + /** + * Get the current data buffer. + * + * This buffer will remain valid until either this cursor is closed, or we + * call read() again on this same cursor. You can find the amount of data + * that was read previously by calling ByteBuffer#remaining. + * + * @return The current data buffer. + */ + public ByteBuffer getData(); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java new file mode 100644 index 00000000000..9cb68277e56 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; + +public class ZeroCopyUnavailableException extends IOException { + private static final long serialVersionUID = 0L; + + public ZeroCopyUnavailableException(String message) { + super(message); + } + + public ZeroCopyUnavailableException(String message, Exception e) { + super(message, e); + } + + public ZeroCopyUnavailableException(Exception e) { + super(e); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index acfbea0c8a5..fb05e3ab1aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -330,4 +330,14 @@ + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt index 56528927987..be9c53edf77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt @@ -142,6 +142,7 @@ target_link_libraries(test_native_mini_dfs ) add_executable(test_libhdfs_threaded + main/native/libhdfs/expect.c main/native/libhdfs/test_libhdfs_threaded.c ) target_link_libraries(test_libhdfs_threaded @@ -150,6 +151,16 @@ target_link_libraries(test_libhdfs_threaded pthread ) +add_executable(test_libhdfs_zerocopy + main/native/libhdfs/expect.c + main/native/libhdfs/test/test_libhdfs_zerocopy.c +) +target_link_libraries(test_libhdfs_zerocopy + hdfs + native_mini_dfs + pthread +) + IF(REQUIRE_LIBWEBHDFS) add_subdirectory(contrib/libwebhdfs) ENDIF(REQUIRE_LIBWEBHDFS) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java index e1e40c0191f..456a79f7d89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java @@ -20,6 +20,8 @@ import java.io.IOException; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.hdfs.client.ClientMmapManager; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; /** * A BlockReader is responsible for reading a single block @@ -81,4 +83,21 @@ public interface BlockReader extends ByteBufferReadable { * All short-circuit reads are also local. */ boolean isShortCircuit(); + + /** + * Do a zero-copy read with the current block reader. + * + * We assume that the calling code has done bounds checking, and won't ask + * us for more bytes than are supposed to be visible (or are in the file). + * + * @param buffers The zero-copy buffers object. + * @param curBlock The current block. + * @param blockPos Position in the current block to start reading at. + * @param toRead The number of bytes to read from the block. + * + * @return true if the read was done, false otherwise. + */ + boolean readZeroCopy(HdfsZeroCopyCursor buffers, + LocatedBlock curBlock, long blockPos, int toRead, + ClientMmapManager mmapManager); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index c1cb0b3db3f..3e430a150c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -22,11 +22,15 @@ import java.io.FileInputStream; import java.io.IOException; import java.nio.ByteBuffer; +import org.apache.hadoop.conf.Configuration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.client.ClientMmap; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; @@ -87,6 +91,8 @@ class BlockReaderLocal implements BlockReader { private final ExtendedBlock block; private final FileInputStreamCache fisCache; + private ClientMmap clientMmap; + private boolean mmapDisabled; private static int getSlowReadBufferNumChunks(int bufSize, int bytesPerChecksum) { @@ -113,6 +119,8 @@ public BlockReaderLocal(DFSClient.Conf conf, String filename, this.datanodeID = datanodeID; this.block = block; this.fisCache = fisCache; + this.clientMmap = null; + this.mmapDisabled = false; // read and handle the common header here. For now just a version checksumIn.getChannel().position(0); @@ -487,6 +495,10 @@ public synchronized long skip(long n) throws IOException { @Override public synchronized void close() throws IOException { + if (clientMmap != null) { + clientMmap.unref(); + clientMmap = null; + } if (fisCache != null) { if (LOG.isDebugEnabled()) { LOG.debug("putting FileInputStream for " + filename + @@ -534,4 +546,48 @@ public boolean isLocal() { public boolean isShortCircuit() { return true; } + + @Override + public boolean readZeroCopy(HdfsZeroCopyCursor cursor, + LocatedBlock curBlock, long blockPos, int toRead, + ClientMmapManager mmapManager) { + if (clientMmap == null) { + if (mmapDisabled) { + return false; + } + try { + clientMmap = mmapManager.fetch(datanodeID, block, dataIn); + if (clientMmap == null) { + mmapDisabled = true; + return false; + } + } catch (InterruptedException e) { + LOG.error("Interrupted while setting up mmap for " + filename, e); + Thread.currentThread().interrupt(); + return false; + } catch (IOException e) { + LOG.error("unable to set up mmap for " + filename, e); + mmapDisabled = true; + return false; + } + } + long limit = blockPos + toRead; + if (limit > Integer.MAX_VALUE) { + /* + * In Java, ByteBuffers use a 32-bit length, capacity, offset, etc. + * This limits our mmap'ed regions to 2 GB in length. + * TODO: we can implement zero-copy for larger blocks by doing multiple + * mmaps. + */ + mmapDisabled = true; + clientMmap.unref(); + clientMmap = null; + return false; + } + ByteBuffer mmapBuffer = clientMmap.getMappedByteBuffer().duplicate(); + mmapBuffer.position((int)blockPos); + mmapBuffer.limit((int)limit); + cursor.setMmap(clientMmap, mmapBuffer); + return true; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index aeb6279bead..a4a4f680bfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -28,6 +28,7 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.util.DirectBufferPool; @@ -701,4 +703,11 @@ public boolean isLocal() { public boolean isShortCircuit() { return true; } + + @Override + public boolean readZeroCopy(HdfsZeroCopyCursor buffers, + LocatedBlock curBlock, long blockPos, int toRead, + ClientMmapManager mmapManager) { + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 0aded40b073..102386931d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -103,6 +103,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -204,7 +205,43 @@ public class DFSClient implements java.io.Closeable { private boolean shouldUseLegacyBlockReaderLocal; private final CachingStrategy defaultReadCachingStrategy; private final CachingStrategy defaultWriteCachingStrategy; + private ClientMmapManager mmapManager; + private static final ClientMmapManagerFactory MMAP_MANAGER_FACTORY = + new ClientMmapManagerFactory(); + + private static final class ClientMmapManagerFactory { + private ClientMmapManager mmapManager = null; + /** + * Tracks the number of users of mmapManager. + */ + private int refcnt = 0; + + synchronized ClientMmapManager get(Configuration conf) { + if (refcnt++ == 0) { + mmapManager = ClientMmapManager.fromConf(conf); + } else { + String mismatches = mmapManager.verifyConfigurationMatches(conf); + if (!mismatches.isEmpty()) { + LOG.warn("The ClientMmapManager settings you specified " + + "have been ignored because another thread created the " + + "ClientMmapManager first. " + mismatches); + } + } + return mmapManager; + } + + synchronized void unref(ClientMmapManager mmapManager) { + if (this.mmapManager != mmapManager) { + throw new IllegalArgumentException(); + } + if (--refcnt == 0) { + IOUtils.cleanup(LOG, mmapManager); + mmapManager = null; + } + } + } + /** * DFSClient configuration */ @@ -513,6 +550,7 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, new CachingStrategy(readDropBehind, readahead); this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); + this.mmapManager = MMAP_MANAGER_FACTORY.get(conf); } /** @@ -716,9 +754,12 @@ void closeConnectionToNamenode() { /** Abort and release resources held. Ignore all errors. */ void abort() { + if (mmapManager != null) { + MMAP_MANAGER_FACTORY.unref(mmapManager); + mmapManager = null; + } clientRunning = false; closeAllFilesBeingWritten(true); - try { // remove reference to this client and stop the renewer, // if there is no more clients under the renewer. @@ -762,6 +803,10 @@ private void closeAllFilesBeingWritten(final boolean abort) { */ @Override public synchronized void close() throws IOException { + if (mmapManager != null) { + MMAP_MANAGER_FACTORY.unref(mmapManager); + mmapManager = null; + } if(clientRunning) { closeAllFilesBeingWritten(false); clientRunning = false; @@ -2474,4 +2519,8 @@ public CachingStrategy getDefaultReadCachingStrategy() { public CachingStrategy getDefaultWriteCachingStrategy() { return defaultWriteCachingStrategy; } + + ClientMmapManager getMmapManager() { + return mmapManager; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f392df8d9ed..7d0fee4d11a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -373,6 +373,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT = 1024 * 1024; public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic"; public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false; + public static final String DFS_CLIENT_MMAP_CACHE_SIZE = "dfs.client.mmap.cache.size"; + public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 1024; + public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms"; + public static final long DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT = 15 * 60 * 1000; + public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.timeout.ms"; + public static final int DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT = 4; // property for fsimage compression public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 4131ffa4426..06b3b68b2d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.ZeroCopyCursor; import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; @@ -92,12 +93,14 @@ public ReadStatistics() { this.totalBytesRead = 0; this.totalLocalBytesRead = 0; this.totalShortCircuitBytesRead = 0; + this.totalZeroCopyBytesRead = 0; } public ReadStatistics(ReadStatistics rhs) { this.totalBytesRead = rhs.getTotalBytesRead(); this.totalLocalBytesRead = rhs.getTotalLocalBytesRead(); this.totalShortCircuitBytesRead = rhs.getTotalShortCircuitBytesRead(); + this.totalZeroCopyBytesRead = rhs.getTotalZeroCopyBytesRead(); } /** @@ -123,6 +126,13 @@ public long getTotalLocalBytesRead() { public long getTotalShortCircuitBytesRead() { return totalShortCircuitBytesRead; } + + /** + * @return The total number of zero-copy bytes read. + */ + public long getTotalZeroCopyBytesRead() { + return totalZeroCopyBytesRead; + } /** * @return The total number of bytes read which were not local. @@ -145,12 +155,21 @@ void addShortCircuitBytes(long amt) { this.totalLocalBytesRead += amt; this.totalShortCircuitBytesRead += amt; } + + void addZeroCopyBytes(long amt) { + this.totalBytesRead += amt; + this.totalLocalBytesRead += amt; + this.totalShortCircuitBytesRead += amt; + this.totalZeroCopyBytesRead += amt; + } private long totalBytesRead; private long totalLocalBytesRead; private long totalShortCircuitBytesRead; + + private long totalZeroCopyBytesRead; } private final FileInputStreamCache fileInputStreamCache; @@ -1393,4 +1412,67 @@ public synchronized void setDropBehind(Boolean dropBehind) this.cachingStrategy.setDropBehind(dropBehind); closeCurrentBlockReader(); } + + synchronized void readZeroCopy(HdfsZeroCopyCursor zcursor, int toRead) + throws IOException { + assert(toRead > 0); + if (((blockReader == null) || (blockEnd == -1)) && + (pos < getFileLength())) { + /* + * If we don't have a blockReader, or the one we have has no more bytes + * left to read, we call seekToBlockSource to get a new blockReader and + * recalculate blockEnd. Note that we assume we're not at EOF here + * (we check this above). + */ + if ((!seekToBlockSource(pos)) || (blockReader == null)) { + throw new IOException("failed to allocate new BlockReader " + + "at position " + pos); + } + } + long curPos = pos; + boolean canSkipChecksums = zcursor.getSkipChecksums(); + long blockLeft = blockEnd - curPos + 1; + if (zcursor.getAllowShortReads()) { + if (blockLeft < toRead) { + toRead = (int)blockLeft; + } + } + if (canSkipChecksums && (toRead <= blockLeft)) { + long blockStartInFile = currentLocatedBlock.getStartOffset(); + long blockPos = curPos - blockStartInFile; + if (blockReader.readZeroCopy(zcursor, + currentLocatedBlock, blockPos, toRead, + dfsClient.getMmapManager())) { + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("readZeroCopy read " + toRead + " bytes from " + + "offset " + curPos + " via the zero-copy read path. " + + "blockEnd = " + blockEnd); + } + readStatistics.addZeroCopyBytes(toRead); + seek(pos + toRead); + return; + } + } + /* + * Slow path reads. + * + * readStatistics will be updated when we call back into this + * stream's read methods. + */ + long prevBlockEnd = blockEnd; + int slowReadAmount = zcursor.readViaSlowPath(toRead); + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("readZeroCopy read " + slowReadAmount + " bytes " + + "from offset " + curPos + " via the fallback read path. " + + "prevBlockEnd = " + prevBlockEnd + ", blockEnd = " + blockEnd + + ", canSkipChecksums = " + canSkipChecksums); + } + } + + @Override + public ZeroCopyCursor createZeroCopyCursor() + throws IOException, UnsupportedOperationException { + return new HdfsZeroCopyCursor(this, + dfsClient.getConf().skipShortCircuitChecksums); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java new file mode 100644 index 00000000000..42b3eb7bcf1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.ZeroCopyCursor; +import org.apache.hadoop.hdfs.client.ClientMmap; + +public class HdfsZeroCopyCursor implements ZeroCopyCursor { + public static final Log LOG = LogFactory.getLog(HdfsZeroCopyCursor.class); + private DFSInputStream stream; + private boolean skipChecksums; + private boolean allowShortReads; + private ClientMmap mmap; + private ByteBuffer fallbackBuffer; + private ByteBuffer readBuffer; + + HdfsZeroCopyCursor(DFSInputStream stream, boolean skipChecksums) { + this.stream = stream; + this.skipChecksums = skipChecksums; + this.allowShortReads = false; + this.mmap = null; + this.fallbackBuffer = null; + this.readBuffer = null; + } + + @Override + public void close() throws IOException { + stream = null; + if (mmap != null) { + mmap.unref(); + mmap = null; + } + fallbackBuffer = null; + readBuffer = null; + } + + @Override + public void setFallbackBuffer(ByteBuffer fallbackBuffer) { + this.fallbackBuffer = fallbackBuffer; + } + + @Override + public ByteBuffer getFallbackBuffer() { + return this.fallbackBuffer; + } + + @Override + public void setSkipChecksums(boolean skipChecksums) { + this.skipChecksums = skipChecksums; + } + + @Override + public boolean getSkipChecksums() { + return this.skipChecksums; + } + + @Override + public void setAllowShortReads(boolean allowShortReads) { + this.allowShortReads = allowShortReads; + } + + @Override + public boolean getAllowShortReads() { + return this.allowShortReads; + } + + @Override + public void read(int toRead) throws UnsupportedOperationException, + EOFException, IOException { + if (toRead < 0) { + throw new IllegalArgumentException("can't read " + toRead + " bytes."); + } + stream.readZeroCopy(this, toRead); + } + + @Override + public ByteBuffer getData() { + return readBuffer; + } + + int readViaSlowPath(int toRead) throws EOFException, IOException { + if (fallbackBuffer == null) { + throw new UnsupportedOperationException("unable to read via " + + "the fastpath, and there was no fallback buffer provided."); + } + fallbackBuffer.clear(); + fallbackBuffer.limit(toRead); // will throw if toRead is too large + + int totalRead = 0; + readBuffer = fallbackBuffer; + try { + while (toRead > 0) { + int nread = stream.read(fallbackBuffer); + if (nread < 0) { + break; + } + toRead -= nread; + totalRead += nread; + if (allowShortReads) { + break; + } + } + } finally { + fallbackBuffer.flip(); + } + if ((toRead > 0) && (!allowShortReads)) { + throw new EOFException("only read " + totalRead + " bytes out of " + + "a requested " + toRead + " before hitting EOF"); + } + return totalRead; + } + + void setMmap(ClientMmap mmap, ByteBuffer readBuffer) { + if (this.mmap != mmap) { + if (this.mmap != null) { + this.mmap.unref(); + } + } + this.mmap = mmap; + mmap.ref(); + this.readBuffer = readBuffer; + } + + ClientMmap getMmap() { + return mmap; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index 9d69dc18182..eab35c97821 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -27,9 +27,11 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FSInputChecker; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -485,4 +487,11 @@ public boolean isLocal() { public boolean isShortCircuit() { return false; } + + @Override + public boolean readZeroCopy(HdfsZeroCopyCursor buffers, + LocatedBlock curBlock, long blockPos, int toRead, + ClientMmapManager mmapManager) { + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 27726ff9fd1..8c2bdf3c844 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -29,9 +29,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver; @@ -40,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -451,4 +452,11 @@ public boolean isLocal() { public boolean isShortCircuit() { return false; } + + @Override + public boolean readZeroCopy(HdfsZeroCopyCursor buffers, + LocatedBlock curBlock, long blockPos, int toRead, + ClientMmapManager manager) { + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java new file mode 100644 index 00000000000..566c2b5457c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmap.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.client; + +import java.io.FileInputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; + +import java.io.IOException; +import java.lang.ref.WeakReference; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel.MapMode; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * A memory-mapped region used by an HDFS client. + * + * This class includes a reference count and some other information used by + * ClientMmapManager to track and cache mmaps. + */ +@InterfaceAudience.Private +public class ClientMmap { + static final Log LOG = LogFactory.getLog(ClientMmap.class); + + /** + * A reference to the manager of this mmap. + * + * This is only a weak reference to help minimize the damange done by + * code which leaks references accidentally. + */ + private final WeakReference manager; + + /** + * The actual mapped memory region. + */ + private final MappedByteBuffer map; + + /** + * A reference count tracking how many threads are using this object. + */ + private final AtomicInteger refCount = new AtomicInteger(1); + + /** + * Block pertaining to this mmap + */ + private final ExtendedBlock block; + + /** + * The DataNode where this mmap came from. + */ + private final DatanodeID datanodeID; + + /** + * The monotonic time when this mmap was last evictable. + */ + private long lastEvictableTimeNs; + + public static ClientMmap load(ClientMmapManager manager, FileInputStream in, + ExtendedBlock block, DatanodeID datanodeID) + throws IOException { + MappedByteBuffer map = + in.getChannel().map(MapMode.READ_ONLY, 0, + in.getChannel().size()); + return new ClientMmap(manager, map, block, datanodeID); + } + + private ClientMmap(ClientMmapManager manager, MappedByteBuffer map, + ExtendedBlock block, DatanodeID datanodeID) + throws IOException { + this.manager = new WeakReference(manager); + this.map = map; + this.block = block; + this.datanodeID = datanodeID; + this.lastEvictableTimeNs = 0; + } + + /** + * Decrement the reference count on this object. + * Should be called with the ClientMmapManager lock held. + */ + public void unref() { + int count = refCount.decrementAndGet(); + if (count < 0) { + throw new IllegalArgumentException("can't decrement the " + + "reference count on this ClientMmap lower than 0."); + } else if (count == 0) { + ClientMmapManager man = manager.get(); + if (man == null) { + unmap(); + } else { + man.makeEvictable(this); + } + } + } + + /** + * Increment the reference count on this object. + * + * @return The new reference count. + */ + public int ref() { + return refCount.getAndIncrement(); + } + + @VisibleForTesting + public ExtendedBlock getBlock() { + return block; + } + + DatanodeID getDatanodeID() { + return datanodeID; + } + + public MappedByteBuffer getMappedByteBuffer() { + return map; + } + + public void setLastEvictableTimeNs(long lastEvictableTimeNs) { + this.lastEvictableTimeNs = lastEvictableTimeNs; + } + + public long getLastEvictableTimeNs() { + return this.lastEvictableTimeNs; + } + + /** + * Unmap the memory region. + * + * There isn't any portable way to unmap a memory region in Java. + * So we use the sun.nio method here. + * Note that unmapping a memory region could cause crashes if code + * continues to reference the unmapped code. However, if we don't + * manually unmap the memory, we are dependent on the finalizer to + * do it, and we have no idea when the finalizer will run. + */ + void unmap() { + assert(refCount.get() == 0); + if (map instanceof sun.nio.ch.DirectBuffer) { + final sun.misc.Cleaner cleaner = + ((sun.nio.ch.DirectBuffer) map).cleaner(); + cleaner.clean(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java new file mode 100644 index 00000000000..7be519439e8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java @@ -0,0 +1,476 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.client; + +import java.io.Closeable; + +import org.apache.hadoop.classification.InterfaceAudience; + +import java.io.FileInputStream; +import java.io.IOException; +import java.lang.ref.WeakReference; +import java.util.Iterator; +import java.util.TreeMap; +import java.util.Map.Entry; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.io.IOUtils; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ComparisonChain; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Tracks mmap instances used on an HDFS client. + * + * mmaps can be used concurrently by multiple threads at once. + * mmaps cannot be closed while they are in use. + * + * The cache is important for performance, because the first time an mmap is + * created, the page table entries (PTEs) are not yet set up. + * Even when reading data that is entirely resident in memory, reading an + * mmap the second time is faster. + */ +@InterfaceAudience.Private +public class ClientMmapManager implements Closeable { + public static final Log LOG = LogFactory.getLog(ClientMmapManager.class); + + private boolean closed = false; + + private final int cacheSize; + + private final long timeoutNs; + + private final int runsPerTimeout; + + private final Lock lock = new ReentrantLock(); + + /** + * Maps block, datanode_id to the client mmap object. + * If the ClientMmap is in the process of being loaded, + * {@link Waitable#await()} will block. + * + * Protected by the ClientMmapManager lock. + */ + private final TreeMap> mmaps = + new TreeMap>(); + + /** + * Maps the last use time to the client mmap object. + * We ensure that each last use time is unique by inserting a jitter of a + * nanosecond or two if necessary. + * + * Protected by the ClientMmapManager lock. + * ClientMmap objects that are in use are never evictable. + */ + private final TreeMap evictable = + new TreeMap(); + + private final ScheduledThreadPoolExecutor executor = + new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder(). + setDaemon(true).setNameFormat("ClientMmapManager"). + build()); + + /** + * The CacheCleaner for this ClientMmapManager. We don't create this + * and schedule it until it becomes necessary. + */ + private CacheCleaner cacheCleaner; + + /** + * Factory method to create a ClientMmapManager from a Hadoop + * configuration. + */ + public static ClientMmapManager fromConf(Configuration conf) { + return new ClientMmapManager(conf.getInt(DFS_CLIENT_MMAP_CACHE_SIZE, + DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT), + conf.getLong(DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, + DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT), + conf.getInt(DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT, + DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT)); + } + + public ClientMmapManager(int cacheSize, long timeoutMs, int runsPerTimeout) { + this.cacheSize = cacheSize; + this.timeoutNs = timeoutMs * 1000000; + this.runsPerTimeout = runsPerTimeout; + } + + long getTimeoutMs() { + return this.timeoutNs / 1000000; + } + + int getRunsPerTimeout() { + return this.runsPerTimeout; + } + + public String verifyConfigurationMatches(Configuration conf) { + StringBuilder bld = new StringBuilder(); + int cacheSize = conf.getInt(DFS_CLIENT_MMAP_CACHE_SIZE, + DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT); + if (this.cacheSize != cacheSize) { + bld.append("You specified a cache size of ").append(cacheSize). + append(", but the existing cache size is ").append(this.cacheSize). + append(". "); + } + long timeoutMs = conf.getLong(DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, + DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT); + if (getTimeoutMs() != timeoutMs) { + bld.append("You specified a cache timeout of ").append(timeoutMs). + append(" ms, but the existing cache timeout is "). + append(getTimeoutMs()).append("ms").append(". "); + } + int runsPerTimeout = conf.getInt( + DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT, + DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT); + if (getRunsPerTimeout() != runsPerTimeout) { + bld.append("You specified ").append(runsPerTimeout). + append(" runs per timeout, but the existing runs per timeout is "). + append(getTimeoutMs()).append(". "); + } + return bld.toString(); + } + + private static class Waitable { + private T val; + private final Condition cond; + + public Waitable(Condition cond) { + this.val = null; + this.cond = cond; + } + + public T await() throws InterruptedException { + while (this.val == null) { + this.cond.await(); + } + return this.val; + } + + public void provide(T val) { + this.val = val; + this.cond.signalAll(); + } + } + + private static class Key implements Comparable { + private final ExtendedBlock block; + private final DatanodeID datanode; + + Key(ExtendedBlock block, DatanodeID datanode) { + this.block = block; + this.datanode = datanode; + } + + /** + * Compare two ClientMmap regions that we're storing. + * + * When we append to a block, we bump the genstamp. It is important to + * compare the genStamp here. That way, we will not return a shorter + * mmap than required. + */ + @Override + public int compareTo(Key o) { + return ComparisonChain.start(). + compare(block.getBlockId(), o.block.getBlockId()). + compare(block.getGenerationStamp(), o.block.getGenerationStamp()). + compare(block.getBlockPoolId(), o.block.getBlockPoolId()). + compare(datanode, o.datanode). + result(); + } + + @Override + public boolean equals(Object rhs) { + if (rhs == null) { + return false; + } + try { + Key o = (Key)rhs; + return (compareTo(o) == 0); + } catch (ClassCastException e) { + return false; + } + } + + @Override + public int hashCode() { + return block.hashCode() ^ datanode.hashCode(); + } + } + + /** + * Thread which handles expiring mmaps from the cache. + */ + private static class CacheCleaner implements Runnable, Closeable { + private WeakReference managerRef; + private ScheduledFuture future; + + CacheCleaner(ClientMmapManager manager) { + this.managerRef= new WeakReference(manager); + } + + @Override + public void run() { + ClientMmapManager manager = managerRef.get(); + if (manager == null) return; + long curTime = System.nanoTime(); + try { + manager.lock.lock(); + manager.evictStaleEntries(curTime); + } finally { + manager.lock.unlock(); + } + } + + void setFuture(ScheduledFuture future) { + this.future = future; + } + + @Override + public void close() throws IOException { + future.cancel(false); + } + } + + /** + * Evict entries which are older than curTime + timeoutNs from the cache. + * + * NOTE: you must call this function with the lock held. + */ + private void evictStaleEntries(long curTime) { + if (closed) { + return; + } + Iterator> iter = + evictable.entrySet().iterator(); + while (iter.hasNext()) { + Entry entry = iter.next(); + if (entry.getKey() + timeoutNs >= curTime) { + return; + } + ClientMmap mmap = entry.getValue(); + Key key = new Key(mmap.getBlock(), mmap.getDatanodeID()); + mmaps.remove(key); + iter.remove(); + mmap.unmap(); + } + } + + /** + * Evict one mmap object from the cache. + * + * NOTE: you must call this function with the lock held. + * + * @return True if an object was evicted; false if none + * could be evicted. + */ + private boolean evictOne() { + Entry entry = evictable.pollFirstEntry(); + if (entry == null) { + // We don't want to try creating another mmap region, because the + // cache is full. + return false; + } + ClientMmap evictedMmap = entry.getValue(); + Key evictedKey = new Key(evictedMmap.getBlock(), + evictedMmap.getDatanodeID()); + mmaps.remove(evictedKey); + evictedMmap.unmap(); + return true; + } + + /** + * Create a new mmap object. + * + * NOTE: you must call this function with the lock held. + * + * @param key The key which describes this mmap. + * @param in The input stream to use to create the mmap. + * @return The new mmap object, or null if there were + * insufficient resources. + * @throws IOException If there was an I/O error creating the mmap. + */ + private ClientMmap create(Key key, FileInputStream in) throws IOException { + if (mmaps.size() + 1 > cacheSize) { + if (!evictOne()) { + LOG.warn("mmap cache is full (with " + cacheSize + " elements) and " + + "nothing is evictable. Ignoring request for mmap with " + + "datanodeID=" + key.datanode + ", " + "block=" + key.block); + return null; + } + } + // Create the condition variable that other threads may wait on. + Waitable waitable = + new Waitable(lock.newCondition()); + mmaps.put(key, waitable); + // Load the entry + boolean success = false; + ClientMmap mmap = null; + try { + try { + lock.unlock(); + mmap = ClientMmap.load(this, in, key.block, key.datanode); + } finally { + lock.lock(); + } + if (cacheCleaner == null) { + cacheCleaner = new CacheCleaner(this); + ScheduledFuture future = + executor.scheduleAtFixedRate(cacheCleaner, + timeoutNs, timeoutNs / runsPerTimeout, TimeUnit.NANOSECONDS); + cacheCleaner.setFuture(future); + } + success = true; + } finally { + if (!success) { + LOG.warn("failed to create mmap for datanodeID=" + key.datanode + + ", " + "block=" + key.block); + mmaps.remove(key); + } + waitable.provide(mmap); + } + return mmap; + } + + /** + * Get or create an mmap region. + * + * @param node The DataNode that owns the block for this mmap region. + * @param block The block ID, block pool ID, and generation stamp of + * the block we want to read. + * @param in An open file for this block. This stream is only used + * if we have to create a new mmap; if we use an + * existing one, it is ignored. + * + * @return The client mmap region. + */ + public ClientMmap fetch(DatanodeID datanodeID, ExtendedBlock block, + FileInputStream in) throws IOException, InterruptedException { + LOG.debug("fetching mmap with datanodeID=" + datanodeID + ", " + + "block=" + block); + Key key = new Key(block, datanodeID); + ClientMmap mmap = null; + try { + lock.lock(); + if (closed) { + throw new IOException("ClientMmapManager is closed."); + } + while (mmap == null) { + Waitable entry = mmaps.get(key); + if (entry == null) { + return create(key, in); + } + mmap = entry.await(); + } + if (mmap.ref() == 1) { + // When going from nobody using the mmap (ref = 0) to somebody + // using the mmap (ref = 1), we must make the mmap un-evictable. + evictable.remove(mmap.getLastEvictableTimeNs()); + } + } + finally { + lock.unlock(); + } + LOG.debug("reusing existing mmap with datanodeID=" + datanodeID + + ", " + "block=" + block); + return mmap; + } + + /** + * Make an mmap evictable. + * + * When an mmap is evictable, it may be removed from the cache if necessary. + * mmaps can only be evictable if nobody is using them. + * + * @param mmap The mmap to make evictable. + */ + void makeEvictable(ClientMmap mmap) { + try { + lock.lock(); + if (closed) { + // If this ClientMmapManager is closed, then don't bother with the + // cache; just close the mmap. + mmap.unmap(); + return; + } + long now = System.nanoTime(); + while (evictable.containsKey(now)) { + now++; + } + mmap.setLastEvictableTimeNs(now); + evictable.put(now, mmap); + } finally { + lock.unlock(); + } + } + + @Override + public void close() throws IOException { + try { + lock.lock(); + closed = true; + IOUtils.cleanup(LOG, cacheCleaner); + + // Unmap all the mmaps that nobody is using. + // The ones which are in use will be unmapped just as soon as people stop + // using them. + evictStaleEntries(Long.MAX_VALUE); + + executor.shutdown(); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + public interface ClientMmapVisitor { + void accept(ClientMmap mmap); + } + + @VisibleForTesting + public synchronized void visitMmaps(ClientMmapVisitor visitor) + throws InterruptedException { + for (Waitable entry : mmaps.values()) { + visitor.accept(entry.await()); + } + } + + public void visitEvictable(ClientMmapVisitor visitor) + throws InterruptedException { + for (ClientMmap mmap : evictable.values()) { + visitor.accept(mmap); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c new file mode 100644 index 00000000000..39761b5a03d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "expect.h" +#include "hdfs.h" + +#include +#include +#include +#include + +int expectFileStats(hdfsFile file, + uint64_t expectedTotalBytesRead, + uint64_t expectedTotalLocalBytesRead, + uint64_t expectedTotalShortCircuitBytesRead, + uint64_t expectedTotalZeroCopyBytesRead) +{ + struct hdfsReadStatistics *stats = NULL; + EXPECT_ZERO(hdfsFileGetReadStatistics(file, &stats)); + if (expectedTotalBytesRead != UINT64_MAX) { + EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead); + } + if (expectedTotalLocalBytesRead != UINT64_MAX) { + EXPECT_INT64_EQ(expectedTotalLocalBytesRead, + stats->totalLocalBytesRead); + } + if (expectedTotalShortCircuitBytesRead != UINT64_MAX) { + EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead, + stats->totalShortCircuitBytesRead); + } + if (expectedTotalZeroCopyBytesRead != UINT64_MAX) { + EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead, + stats->totalZeroCopyBytesRead); + } + hdfsFileFreeReadStatistics(stats); + return 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h index 9d5d863881b..3dc777127dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h @@ -19,16 +19,19 @@ #ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H #define LIBHDFS_NATIVE_TESTS_EXPECT_H +#include #include +struct hdfsFile_internal; + #define EXPECT_ZERO(x) \ do { \ int __my_ret__ = x; \ if (__my_ret__) { \ int __my_errno__ = errno; \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ "code %d (errno: %d): got nonzero from %s\n", \ - __LINE__, __my_ret__, __my_errno__, #x); \ + __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \ return __my_ret__; \ } \ } while (0); @@ -38,9 +41,9 @@ void* __my_ret__ = x; \ int __my_errno__ = errno; \ if (__my_ret__ != NULL) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \ "got non-NULL value %p from %s\n", \ - __LINE__, __my_errno__, __my_ret__, #x); \ + __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \ return -1; \ } \ } while (0); @@ -50,8 +53,8 @@ void* __my_ret__ = x; \ int __my_errno__ = errno; \ if (__my_ret__ == NULL) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d (errno: %d): " \ - "got NULL from %s\n", __LINE__, __my_errno__, #x); \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \ + "got NULL from %s\n", __FILE__, __LINE__, __my_errno__, #x); \ return -1; \ } \ } while (0); @@ -61,15 +64,16 @@ int __my_ret__ = x; \ int __my_errno__ = errno; \ if (__my_ret__ != -1) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ - "code %d (errno: %d): expected -1 from %s\n", __LINE__, \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ + "code %d (errno: %d): expected -1 from %s\n", \ + __FILE__, __LINE__, \ __my_ret__, __my_errno__, #x); \ return -1; \ } \ if (__my_errno__ != e) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ "code %d (errno: %d): expected errno = %d from %s\n", \ - __LINE__, __my_ret__, __my_errno__, e, #x); \ + __FILE__, __LINE__, __my_ret__, __my_errno__, e, #x); \ return -1; \ } \ } while (0); @@ -79,9 +83,9 @@ int __my_ret__ = x; \ int __my_errno__ = errno; \ if (!__my_ret__) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ - "code %d (errno: %d): got zero from %s\n", __LINE__, \ - __my_ret__, __my_errno__, #x); \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ + "code %d (errno: %d): got zero from %s\n", __FILE__, __LINE__, \ + __my_ret__, __my_errno__, #x); \ return -1; \ } \ } while (0); @@ -91,9 +95,9 @@ int __my_ret__ = x; \ int __my_errno__ = errno; \ if (__my_ret__ < 0) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ "code %d (errno: %d): got negative return from %s\n", \ - __LINE__, __my_ret__, __my_errno__, #x); \ + __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \ return __my_ret__; \ } \ } while (0); @@ -103,9 +107,21 @@ int __my_ret__ = y; \ int __my_errno__ = errno; \ if (__my_ret__ != (x)) { \ - fprintf(stderr, "TEST_ERROR: failed on line %d with return " \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ "code %d (errno: %d): expected %d\n", \ - __LINE__, __my_ret__, __my_errno__, (x)); \ + __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \ + return -1; \ + } \ + } while (0); + +#define EXPECT_INT64_EQ(x, y) \ + do { \ + int64_t __my_ret__ = y; \ + int __my_errno__ = errno; \ + if (__my_ret__ != (x)) { \ + fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \ + "value %"PRId64" (errno: %d): expected %"PRId64"\n", \ + __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \ return -1; \ } \ } while (0); @@ -117,4 +133,17 @@ ret = -errno; \ } while (ret == -EINTR); +/** + * Test that an HDFS file has the given statistics. + * + * Any parameter can be set to UINT64_MAX to avoid checking it. + * + * @return 0 on success; error code otherwise + */ +int expectFileStats(struct hdfsFile_internal *file, + uint64_t expectedTotalBytesRead, + uint64_t expectedTotalLocalBytesRead, + uint64_t expectedTotalShortCircuitBytesRead, + uint64_t expectedTotalZeroCopyBytesRead); + #endif diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index 27824347692..cfffe385a8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -39,6 +39,7 @@ #define JAVA_NET_ISA "java/net/InetSocketAddress" #define JAVA_NET_URI "java/net/URI" #define JAVA_STRING "java/lang/String" +#define HADOOP_ZERO_COPY_CURSOR "org/apache/hadoop/fs/ZeroCopyCursor" #define JAVA_VOID "V" @@ -143,6 +144,15 @@ int hdfsFileGetReadStatistics(hdfsFile file, goto done; } s->totalShortCircuitBytesRead = jVal.j; + jthr = invokeMethod(env, &jVal, INSTANCE, readStats, + "org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics", + "getTotalZeroCopyBytesRead", "()J"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hdfsFileGetReadStatistics: getTotalZeroCopyBytesRead failed"); + goto done; + } + s->totalZeroCopyBytesRead = jVal.j; *stats = s; s = NULL; ret = 0; @@ -183,6 +193,25 @@ void hdfsFileDisableDirectRead(hdfsFile file) file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ; } +int hdfsDisableDomainSocketSecurity(void) +{ + jthrowable jthr; + JNIEnv* env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + jthr = invokeMethod(env, NULL, STATIC, NULL, + "org/apache/hadoop/net/unix/DomainSocket", + "disableBindPathValidation", "()V"); + if (jthr) { + errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "DomainSocket#disableBindPathValidation"); + return -1; + } + return 0; +} + /** * hdfsJniEnv: A wrapper struct to be used as 'value' * while saving thread -> JNIEnv* mappings @@ -220,40 +249,6 @@ static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path, return NULL; } -/** - * Set a configuration value. - * - * @param env The JNI environment - * @param jConfiguration The configuration object to modify - * @param key The key to modify - * @param value The value to set the key to - * - * @return NULL on success; exception otherwise - */ -static jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration, - const char *key, const char *value) -{ - jthrowable jthr; - jstring jkey = NULL, jvalue = NULL; - - jthr = newJavaStr(env, key, &jkey); - if (jthr) - goto done; - jthr = newJavaStr(env, value, &jvalue); - if (jthr) - goto done; - jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration, - HADOOP_CONF, "set", JMETHOD2(JPARAM(JAVA_STRING), - JPARAM(JAVA_STRING), JAVA_VOID), - jkey, jvalue); - if (jthr) - goto done; -done: - destroyLocalReference(env, jkey); - destroyLocalReference(env, jvalue); - return jthr; -} - static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration, const char *key, char **val) { @@ -2108,6 +2103,248 @@ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime) return 0; } +struct hadoopZeroCopyCursor* hadoopZeroCopyCursorAlloc(hdfsFile file) +{ + int ret; + jobject zcursor = NULL; + jvalue jVal; + jthrowable jthr; + JNIEnv* env; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return NULL; + } + if (file->type != INPUT) { + ret = EINVAL; + goto done; + } + jthr = invokeMethod(env, &jVal, INSTANCE, (jobject)file->file, HADOOP_ISTRM, + "createZeroCopyCursor", "()L"HADOOP_ZERO_COPY_CURSOR";"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyCursorAlloc: createZeroCopyCursor"); + goto done; + } + zcursor = (*env)->NewGlobalRef(env, jVal.l); + if (!zcursor) { + ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, + "hadoopZeroCopyCursorAlloc: NewGlobalRef"); + } + ret = 0; +done: + if (ret) { + errno = ret; + } + return (struct hadoopZeroCopyCursor*)zcursor; +} + +int hadoopZeroCopyCursorSetFallbackBuffer(struct hadoopZeroCopyCursor* zcursor, + void *cbuf, uint32_t size) +{ + int ret; + jobject buffer = NULL; + jthrowable jthr; + JNIEnv* env; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + buffer = (*env)->NewDirectByteBuffer(env, cbuf, size); + if (!buffer) { + ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, + "hadoopZeroCopyCursorSetFallbackBuffer: NewDirectByteBuffer(" + "size=%"PRId32"):", size); + goto done; + } + jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "setFallbackBuffer", + "(Ljava/nio/ByteBuffer;)V", buffer); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyCursorSetFallbackBuffer: " + "FileSystem#setFallbackBuffer"); + goto done; + } + ret = 0; +done: + if (ret) { + (*env)->DeleteLocalRef(env, buffer); + errno = ret; + return -1; + } + return 0; +} + +int hadoopZeroCopyCursorSetSkipChecksums(struct hadoopZeroCopyCursor* zcursor, + int skipChecksums) +{ + JNIEnv* env; + jthrowable jthr; + jboolean shouldSkipChecksums = skipChecksums ? JNI_TRUE : JNI_FALSE; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "setSkipChecksums", "(Z)V", + shouldSkipChecksums); + if (jthr) { + errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyCursorSetSkipChecksums(): setSkipChecksums failed"); + return -1; + } + return 0; +} + +int hadoopZeroCopyCursorSetAllowShortReads( + struct hadoopZeroCopyCursor* zcursor, int allowShort) +{ + JNIEnv* env; + jthrowable jthr; + jboolean shouldAllowShort = allowShort ? JNI_TRUE : JNI_FALSE; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "setAllowShortReads", "(Z)V", + shouldAllowShort); + if (jthr) { + errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyCursorSetAllowShortReads(): setAllowShortReads " + "failed"); + return -1; + } + return 0; +} + +void hadoopZeroCopyCursorFree(struct hadoopZeroCopyCursor *zcursor) +{ + JNIEnv* env; + jthrowable jthr; + + env = getJNIEnv(); + if (env == NULL) { + return; + } + jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "close", "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyCursorFree(): close failed"); + } + (*env)->DeleteGlobalRef(env, (jobject)zcursor); +} + +/** + * Translate an exception from ZeroCopyCursor#read, translate it into a return + * code. + */ +static int translateZCRException(JNIEnv *env, jthrowable exc) +{ + int ret; + char *className = NULL; + jthrowable jthr = classNameOfObject(exc, env, &className); + + if (jthr) { + fprintf(stderr, "hadoopZeroCopyRead: unknown " + "exception from read().\n"); + destroyLocalReference(env, jthr); + destroyLocalReference(env, jthr); + ret = EIO; + goto done; + } + if (!strcmp(className, "java.io.EOFException")) { + ret = 0; // EOF + goto done; + } + if (!strcmp(className, "java.lang.UnsupportedOperationException")) { + ret = EPROTONOSUPPORT; + goto done; + } + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyRead: ZeroCopyCursor#read failed"); +done: + free(className); + return ret; +} + +int32_t hadoopZeroCopyRead(struct hadoopZeroCopyCursor *zcursor, + int32_t toRead, const void **data) +{ + int32_t ret, nRead = -1; + JNIEnv* env; + jthrowable jthr; + jobject byteBuffer = NULL; + uint8_t *addr; + jint position; + jvalue jVal; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return -1; + } + jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "read", "(I)V", toRead); + if (jthr) { + ret = translateZCRException(env, jthr); + goto done; + } + jthr = invokeMethod(env, &jVal, INSTANCE, (jobject)zcursor, + HADOOP_ZERO_COPY_CURSOR, "getData", + "()Ljava/nio/ByteBuffer;"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyRead(toRead=%"PRId32"): getData failed", + toRead); + goto done; + } + byteBuffer = jVal.l; + addr = (*env)->GetDirectBufferAddress(env, byteBuffer); + if (!addr) { + fprintf(stderr, "hadoopZeroCopyRead(toRead=%"PRId32"): " + "failed to get direct buffer address.\n", toRead); + ret = EIO; + goto done; + } + jthr = invokeMethod(env, &jVal, INSTANCE, byteBuffer, + "java/nio/ByteBuffer", "position", "()I"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyRead(toRead=%"PRId32"): ByteBuffer#position " + "failed", toRead); + goto done; + } + position = jVal.i; + jthr = invokeMethod(env, &jVal, INSTANCE, byteBuffer, + "java/nio/ByteBuffer", "remaining", "()I"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopZeroCopyRead(toRead=%"PRId32"): ByteBuffer#remaining " + "failed", toRead); + goto done; + } + ret = 0; + nRead = jVal.i; + *data = addr + position; +done: + (*env)->DeleteLocalRef(env, byteBuffer); + if (nRead == -1) { + errno = ret; + return -1; + } + return nRead; +} + char*** hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h index 1871665955c..69fad082b69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h @@ -85,6 +85,7 @@ extern "C" { uint64_t totalBytesRead; uint64_t totalLocalBytesRead; uint64_t totalShortCircuitBytesRead; + uint64_t totalZeroCopyBytesRead; }; /** @@ -680,7 +681,89 @@ extern "C" { * @return 0 on success else -1 */ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime); - + + /** + * Create a zero-copy cursor object. + * + * @param file The file to use for zero-copy reads. + * + * @return The zero-copy cursor, or NULL + errno on failure. + */ + struct hadoopZeroCopyCursor* hadoopZeroCopyCursorAlloc(hdfsFile file); + + /** + * Set the fallback buffer which will be used by the zero copy object. + * + * You are responsible for ensuring that this buffer stays valid until you + * either set a different buffer by calling this function again, or free the + * zero-copy cursor. + * + * @param zcursor The zero-copy cursor. + * @param cbuf The buffer to use. + * @param size Size of the buffer. + * + * @return 0 on success. -1 on error. Errno will be set on + * error. + */ + int hadoopZeroCopyCursorSetFallbackBuffer( + struct hadoopZeroCopyCursor* zcursor, void *cbuf, uint32_t size); + + /** + * Set whether our cursor should skip checksums or not. + * + * @param zcursor The cursor + * @param skipChecksums Nonzero to skip checksums. + * + * @return -1 on error, 0 otherwise. + */ + int hadoopZeroCopyCursorSetSkipChecksums( + struct hadoopZeroCopyCursor* zcursor, int skipChecksums); + + /** + * Set whether our cursor should allow short reads to occur. + * Short reads will always occur if there is not enough data to read + * (i.e., at EOF), but normally we don't return them when reading other + * parts of the file. + * + * @param zcursor The cursor + * @param skipChecksums Nonzero to skip checksums. + * + * @return -1 on error, 0 otherwise. + */ + int hadoopZeroCopyCursorSetAllowShortReads( + struct hadoopZeroCopyCursor* zcursor, int allowShort); + + /** + * Free zero-copy cursor. + * + * This will dispose of the cursor allocated by hadoopZeroCopyCursorAlloc, as + * well as any memory map that we have created. You must be done with the + * data returned from hadoopZeroCopyRead before calling this. + * + * @param zcursor The zero-copy cursor. + */ + void hadoopZeroCopyCursorFree(struct hadoopZeroCopyCursor *zcursor); + + /* + * Perform a zero-copy read. + * + * @param zcursor The zero-copy cursor object. + * @param toRead The maximum amount to read. + * @param data (out param) on succesful return, a pointer to the + * data. This pointer will remain valid until the next + * call to hadoopZeroCopyRead, or until + * hadoopZeroCopyCursorFree is called on zcursor. + * + * @return -2 if zero-copy is unavailable, and + * -1 if there was an error. errno will be the error. + * 0 if we hit end-of-file without reading anything. + * The positive number of bytes read otherwise. Short + * reads will happen only if EOF is reached. + * The amount read otherwise. + */ + int32_t hadoopZeroCopyRead(struct hadoopZeroCopyCursor *zcursor, + int32_t toRead, const void **data); + #ifdef __cplusplus } #endif diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h index b3ff4f2a637..0eab9a68aea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h @@ -48,6 +48,15 @@ extern "C" { * @param file The HDFS file */ void hdfsFileDisableDirectRead(struct hdfsFile_internal *file); + + /** + * Disable domain socket security checks. + * + * @param 0 if domain socket security was disabled; + * -1 if not. + */ + int hdfsDisableDomainSocketSecurity(void); + #ifdef __cplusplus } #endif diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c index c768c9c1d04..21ff9d9e0da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c @@ -608,3 +608,42 @@ JNIEnv* getJNIEnv(void) return env; } +int javaObjectIsOfClass(JNIEnv *env, jobject obj, const char *name) +{ + jclass clazz; + int ret; + + clazz = (*env)->FindClass(env, name); + if (!clazz) { + printPendingExceptionAndFree(env, PRINT_EXC_ALL, + "javaObjectIsOfClass(%s)", name); + return -1; + } + ret = (*env)->IsInstanceOf(env, obj, clazz); + (*env)->DeleteLocalRef(env, clazz); + return ret == JNI_TRUE ? 1 : 0; +} + +jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration, + const char *key, const char *value) +{ + jthrowable jthr; + jstring jkey = NULL, jvalue = NULL; + + jthr = newJavaStr(env, key, &jkey); + if (jthr) + goto done; + jthr = newJavaStr(env, value, &jvalue); + if (jthr) + goto done; + jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration, + "org/apache/hadoop/conf/Configuration", "set", + "(Ljava/lang/String;Ljava/lang/String;)V", + jkey, jvalue); + if (jthr) + goto done; +done: + (*env)->DeleteLocalRef(env, jkey); + (*env)->DeleteLocalRef(env, jvalue); + return jthr; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h index f37dea739fc..c2a7409a9c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h @@ -114,6 +114,32 @@ jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name); * */ JNIEnv* getJNIEnv(void); +/** + * Figure out if a Java object is an instance of a particular class. + * + * @param env The Java environment. + * @param obj The object to check. + * @param name The class name to check. + * + * @return -1 if we failed to find the referenced class name. + * 0 if the object is not of the given class. + * 1 if the object is of the given class. + */ +int javaObjectIsOfClass(JNIEnv *env, jobject obj, const char *name); + +/** + * Set a value in a configuration object. + * + * @param env The JNI environment + * @param jConfiguration The configuration object to modify + * @param key The key to modify + * @param value The value to set the key to + * + * @return NULL on success; exception otherwise + */ +jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration, + const char *key, const char *value); + #endif /*LIBHDFS_JNI_HELPER_H*/ /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c index a1476ca18f0..77e2f0766d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c @@ -17,14 +17,19 @@ */ #include "exception.h" +#include "hdfs.h" +#include "hdfs_test.h" #include "jni_helper.h" #include "native_mini_dfs.h" #include #include +#include #include #include #include +#include +#include #define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder" #define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster" @@ -39,8 +44,44 @@ struct NativeMiniDfsCluster { * The NativeMiniDfsCluster object */ jobject obj; + + /** + * Path to the domain socket, or the empty string if there is none. + */ + char domainSocketPath[PATH_MAX]; }; +static jthrowable nmdConfigureShortCircuit(JNIEnv *env, + struct NativeMiniDfsCluster *cl, jobject cobj) +{ + jthrowable jthr; + char *tmpDir; + + int ret = hdfsDisableDomainSocketSecurity(); + if (ret) { + return newRuntimeError(env, "failed to disable hdfs domain " + "socket security: error %d", ret); + } + jthr = hadoopConfSetStr(env, cobj, "dfs.client.read.shortcircuit", "true"); + if (jthr) { + return jthr; + } + tmpDir = getenv("TMPDIR"); + if (!tmpDir) { + tmpDir = "/tmp"; + } + snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d", + tmpDir, getpid(), rand()); + snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d", + tmpDir, getpid(), rand()); + jthr = hadoopConfSetStr(env, cobj, "dfs.domain.socket.path", + cl->domainSocketPath); + if (jthr) { + return jthr; + } + return NULL; +} + struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf) { struct NativeMiniDfsCluster* cl = NULL; @@ -81,6 +122,28 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf) goto error; } } + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: Configuration::setBoolean"); + goto error; + } + // Disable 'minimum block size' -- it's annoying in tests. + (*env)->DeleteLocalRef(env, jconfStr); + jconfStr = NULL; + jthr = newJavaStr(env, "dfs.namenode.fs-limits.min-block-size", &jconfStr); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: new String"); + goto error; + } + jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF, + "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: Configuration::setLong"); + goto error; + } + // Creae MiniDFSCluster object jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER, "(L"HADOOP_CONF";)V", cobj); if (jthr) { @@ -88,6 +151,14 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf) "nmdCreate: NativeMiniDfsCluster#Builder#Builder"); goto error; } + if (conf->configureShortCircuit) { + jthr = nmdConfigureShortCircuit(env, cl, cobj); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: nmdConfigureShortCircuit error"); + goto error; + } + } jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER, "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat); if (jthr) { @@ -272,3 +343,29 @@ error_dlr_nn: return ret; } + +int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl, + struct hdfsBuilder *bld) +{ + int port, ret; + + hdfsBuilderSetNameNode(bld, "localhost"); + port = nmdGetNameNodePort(cl); + if (port < 0) { + fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port); + return EIO; + } + hdfsBuilderSetNameNodePort(bld, port); + if (cl->domainSocketPath[0]) { + ret = hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit", "true"); + if (ret) { + return ret; + } + ret = hdfsBuilderConfSetStr(bld, "dfs.domain.socket.path", + cl->domainSocketPath); + if (ret) { + return ret; + } + } + return 0; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h index 6bf29905ad9..41d69c2966a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h @@ -21,6 +21,7 @@ #include /* for jboolean */ +struct hdfsBuilder; struct NativeMiniDfsCluster; /** @@ -28,17 +29,24 @@ struct NativeMiniDfsCluster; */ struct NativeMiniDfsConf { /** - * Nonzero if the cluster should be formatted prior to startup + * Nonzero if the cluster should be formatted prior to startup. */ jboolean doFormat; + /** * Whether or not to enable webhdfs in MiniDfsCluster */ jboolean webhdfsEnabled; + /** * The http port of the namenode in MiniDfsCluster */ jint namenodeHttpPort; + + /** + * Nonzero if we should configure short circuit. + */ + jboolean configureShortCircuit; }; /** @@ -84,7 +92,7 @@ void nmdFree(struct NativeMiniDfsCluster* cl); * * @return the port, or a negative error code */ -int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); +int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); /** * Get the http address that's in use by the given (non-HA) nativeMiniDfs @@ -101,4 +109,14 @@ int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl, int *port, const char **hostName); +/** + * Configure the HDFS builder appropriately to connect to this cluster. + * + * @param bld The hdfs builder + * + * @return the port, or a negative error code + */ +int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl, + struct hdfsBuilder *bld); + #endif diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c new file mode 100644 index 00000000000..0b34540ba95 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c @@ -0,0 +1,225 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "expect.h" +#include "hdfs.h" +#include "native_mini_dfs.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TO_STR_HELPER(X) #X +#define TO_STR(X) TO_STR_HELPER(X) + +#define TEST_FILE_NAME_LENGTH 128 +#define TEST_ZEROCOPY_FULL_BLOCK_SIZE 4096 +#define TEST_ZEROCOPY_LAST_BLOCK_SIZE 3215 +#define TEST_ZEROCOPY_NUM_BLOCKS 6 +#define SMALL_READ_LEN 16 + +#define ZC_BUF_LEN 32768 + +static uint8_t *getZeroCopyBlockData(int blockIdx) +{ + uint8_t *buf = malloc(TEST_ZEROCOPY_FULL_BLOCK_SIZE); + int i; + if (!buf) { + fprintf(stderr, "malloc(%d) failed\n", TEST_ZEROCOPY_FULL_BLOCK_SIZE); + exit(1); + } + for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) { + buf[i] = blockIdx + (i % 17); + } + return buf; +} + +static int getZeroCopyBlockLen(int blockIdx) +{ + if (blockIdx >= TEST_ZEROCOPY_NUM_BLOCKS) { + return 0; + } else if (blockIdx == (TEST_ZEROCOPY_NUM_BLOCKS - 1)) { + return TEST_ZEROCOPY_LAST_BLOCK_SIZE; + } else { + return TEST_ZEROCOPY_FULL_BLOCK_SIZE; + } +} + +static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused)); + +static void printBuf(const uint8_t *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + fprintf(stderr, "%02x", buf[i]); + } + fprintf(stderr, "\n"); +} + +static int doTestZeroCopyReads(hdfsFS fs, const char *fileName) +{ + hdfsFile file = NULL; + struct hadoopZeroCopyCursor *zcursor = NULL; + uint8_t *backingBuffer = NULL, *block; + const void *zcPtr; + + file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0); + EXPECT_NONNULL(file); + zcursor = hadoopZeroCopyCursorAlloc(file); + EXPECT_NONNULL(zcursor); + /* haven't read anything yet */ + EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL)); + block = getZeroCopyBlockData(0); + EXPECT_NONNULL(block); + /* first read is half of a block. */ + EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, + hadoopZeroCopyRead(zcursor, + TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, &zcPtr)); + EXPECT_ZERO(memcmp(zcPtr, block, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + /* read the next half of the block */ + EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, + hadoopZeroCopyRead(zcursor, + TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, &zcPtr)); + EXPECT_ZERO(memcmp(zcPtr, block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2), + TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + free(block); + EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, + TEST_ZEROCOPY_FULL_BLOCK_SIZE, + TEST_ZEROCOPY_FULL_BLOCK_SIZE, + TEST_ZEROCOPY_FULL_BLOCK_SIZE)); + /* Now let's read just a few bytes. */ + EXPECT_INT_EQ(SMALL_READ_LEN, + hadoopZeroCopyRead(zcursor, SMALL_READ_LEN, &zcPtr)); + block = getZeroCopyBlockData(1); + EXPECT_NONNULL(block); + EXPECT_ZERO(memcmp(block, zcPtr, SMALL_READ_LEN)); + EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, + hdfsTell(fs, file)); + EXPECT_ZERO(expectFileStats(file, + TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, + TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, + TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, + TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN)); + + /* Try to read a full block's worth of data. This will cross the block + * boundary, which means we have to fall back to non-zero-copy reads. + * However, because we don't have a backing buffer, the fallback will fail + * with EPROTONOSUPPORT. */ + EXPECT_INT_EQ(-1, + hadoopZeroCopyRead(zcursor, TEST_ZEROCOPY_FULL_BLOCK_SIZE, &zcPtr)); + EXPECT_INT_EQ(EPROTONOSUPPORT, errno); + + /* Now set a backing buffer and try again. It should succeed this time. */ + backingBuffer = malloc(ZC_BUF_LEN); + EXPECT_NONNULL(backingBuffer); + EXPECT_ZERO(hadoopZeroCopyCursorSetFallbackBuffer(zcursor, + backingBuffer, ZC_BUF_LEN)); + EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, + hadoopZeroCopyRead(zcursor, TEST_ZEROCOPY_FULL_BLOCK_SIZE, &zcPtr)); + EXPECT_ZERO(expectFileStats(file, + (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, + (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, + (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, + TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN)); + EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, zcPtr, + TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN)); + free(block); + block = getZeroCopyBlockData(2); + EXPECT_NONNULL(block); + EXPECT_ZERO(memcmp(block, zcPtr + + (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN)); + free(block); + hadoopZeroCopyCursorFree(zcursor); + EXPECT_ZERO(hdfsCloseFile(fs, file)); + free(backingBuffer); + return 0; +} + +static int createZeroCopyTestFile(hdfsFS fs, char *testFileName, + size_t testFileNameLen) +{ + int blockIdx, blockLen; + hdfsFile file; + uint8_t *data; + + snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d", + getpid(), rand()); + file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1, + TEST_ZEROCOPY_FULL_BLOCK_SIZE); + EXPECT_NONNULL(file); + for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) { + blockLen = getZeroCopyBlockLen(blockIdx); + data = getZeroCopyBlockData(blockIdx); + EXPECT_NONNULL(data); + EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen)); + } + EXPECT_ZERO(hdfsCloseFile(fs, file)); + return 0; +} + +/** + * Test that we can write a file with libhdfs and then read it back + */ +int main(void) +{ + int port; + struct NativeMiniDfsConf conf = { + .doFormat = 1, + .configureShortCircuit = 1, + }; + char testFileName[TEST_FILE_NAME_LENGTH]; + hdfsFS fs; + struct NativeMiniDfsCluster* cl; + struct hdfsBuilder *bld; + + cl = nmdCreate(&conf); + EXPECT_NONNULL(cl); + EXPECT_ZERO(nmdWaitClusterUp(cl)); + port = nmdGetNameNodePort(cl); + if (port < 0) { + fprintf(stderr, "TEST_ERROR: test_zerocopy: " + "nmdGetNameNodePort returned error %d\n", port); + return EXIT_FAILURE; + } + bld = hdfsNewBuilder(); + EXPECT_NONNULL(bld); + EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld)); + hdfsBuilderSetForceNewInstance(bld); + hdfsBuilderConfSetStr(bld, "dfs.block.size", + TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE)); + /* ensure that we'll always get our mmaps */ + hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum", + "true"); + fs = hdfsBuilderConnect(bld); + EXPECT_NONNULL(fs); + EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName, + TEST_FILE_NAME_LENGTH)); + EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName)); + EXPECT_ZERO(hdfsDisconnect(fs)); + EXPECT_ZERO(nmdShutdown(cl)); + nmdFree(cl); + fprintf(stderr, "TEST_SUCCESS\n"); + return EXIT_SUCCESS; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 06eca701264..a2dcf8c347c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1391,4 +1391,32 @@ linearly increases. + + + dfs.client.mmap.cache.size + 1024 + + When zero-copy reads are used, the DFSClient keeps a cache of recently used + memory mapped regions. This parameter controls the maximum number of + entries that we will keep in that cache. + + If this is set to 0, we will not allow mmap. + + The larger this number is, the more file descriptors we will potentially + use for memory-mapped files. mmaped files also use virtual address space. + You may need to increase your ulimit virtual address space limits before + increasing the client mmap cache size. + + + + + dfs.client.mmap.cache.timeout.ms + 900000 + + The minimum length of time that we will keep an mmap entry in the cache + between uses. If an entry is in the cache longer than this, and nobody + uses it, it will be removed by a background thread. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java index 057b79fd114..5015a56a42a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java @@ -23,24 +23,44 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics; +import org.apache.commons.lang.SystemUtils; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.ZeroCopyCursor; +import org.apache.hadoop.hdfs.client.ClientMmap; +import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; +import org.junit.BeforeClass; import org.junit.Test; public class TestBlockReaderLocal { + private static TemporarySocketDirectory sockDir; + + @BeforeClass + public static void init() { + sockDir = new TemporarySocketDirectory(); + DomainSocket.disableBindPathValidation(); + } + + @AfterClass + public static void shutdown() throws IOException { + sockDir.close(); + } + public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2, int off2, int len) { for (int i = 0; i < len; i++) { @@ -100,10 +120,11 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, FSDataInputStream fsIn = null; byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; + FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - FileSystem fs = cluster.getFileSystem(); + fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short)1, RANDOM_SEED); try { @@ -138,6 +159,7 @@ public void runBlockReaderLocalTest(BlockReaderLocalTest test, test.doTest(blockReaderLocal, original); } finally { if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); if (dataIn != null) dataIn.close(); if (checkIn != null) checkIn.close(); @@ -382,10 +404,11 @@ private void testStatistics(boolean isShortCircuit) throws Exception { final long RANDOM_SEED = 4567L; FSDataInputStream fsIn = null; byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; + FileSystem fs = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - FileSystem fs = cluster.getFileSystem(); + fs = cluster.getFileSystem(); DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short)1, RANDOM_SEED); try { @@ -417,8 +440,327 @@ private void testStatistics(boolean isShortCircuit) throws Exception { } finally { DFSInputStream.tcpReadsDisabledForTesting = false; if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); if (sockDir != null) sockDir.close(); } } + + private static byte[] byteBufferToArray(ByteBuffer buf) { + byte resultArray[] = new byte[buf.remaining()]; + buf.get(resultArray); + return resultArray; + } + + public static HdfsConfiguration initZeroCopyTest() { + Assume.assumeTrue(NativeIO.isAvailable()); + Assume.assumeTrue(SystemUtils.IS_OS_UNIX); + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); + sockDir = new TemporarySocketDirectory(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); + conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3); + conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100); + conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, + new File(sockDir.getDir(), + "TestRequestMmapAccess._PORT.sock").getAbsolutePath()); + conf.setBoolean(DFSConfigKeys. + DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true); + return conf; + } + + @Test + public void testZeroCopyReads() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + ZeroCopyCursor zcursor = null; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; + IOUtils.readFully(fsIn, original, 0, + BlockReaderLocalTest.TEST_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + zcursor = fsIn.createZeroCopyCursor(); + zcursor.setFallbackBuffer(ByteBuffer. + allocateDirect(1024 * 1024 * 4)); + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + zcursor.read(4096); + ByteBuffer result = zcursor.getData(); + Assert.assertEquals(4096, result.remaining()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + } finally { + if (zcursor != null) zcursor.close(); + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + @Test + public void testShortZeroCopyReads() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + ZeroCopyCursor zcursor = null; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; + IOUtils.readFully(fsIn, original, 0, + BlockReaderLocalTest.TEST_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + zcursor = fsIn.createZeroCopyCursor(); + zcursor.setFallbackBuffer(ByteBuffer. + allocateDirect(1024 * 1024 * 4)); + zcursor.setAllowShortReads(true); + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + zcursor.read(8192); + ByteBuffer result = zcursor.getData(); + Assert.assertEquals(4096, result.remaining()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + zcursor.read(4097); + result = zcursor.getData(); + Assert.assertEquals(4096, result.remaining()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192), + byteBufferToArray(result)); + zcursor.setAllowShortReads(false); + zcursor.read(4100); + result = zcursor.getData(); + Assert.assertEquals(4100, result.remaining()); + + Assert.assertArrayEquals(Arrays.copyOfRange(original, 8192, 12292), + byteBufferToArray(result)); + } finally { + if (zcursor != null) zcursor.close(); + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + @Test + public void testZeroCopyReadsNoBackingBuffer() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + ZeroCopyCursor zcursor = null; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; + IOUtils.readFully(fsIn, original, 0, + BlockReaderLocalTest.TEST_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + zcursor = fsIn.createZeroCopyCursor(); + zcursor.setAllowShortReads(false); + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + // This read is longer than the file, and we do not have short reads enabled. + try { + zcursor.read(8192); + Assert.fail("expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + // expected + } + // This read is longer than the block, and we do not have short reads enabled. + try { + zcursor.read(4097); + Assert.fail("expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + // expected + } + // This read should succeed. + zcursor.read(4096); + ByteBuffer result = zcursor.getData(); + Assert.assertEquals(4096, result.remaining()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + } finally { + if (zcursor != null) zcursor.close(); + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + private static class CountingVisitor + implements ClientMmapManager.ClientMmapVisitor { + int count = 0; + + @Override + public void accept(ClientMmap mmap) { + count++; + } + + public void reset() { + count = 0; + } + } + + @Test + public void testZeroCopyMmapCache() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + final int TEST_FILE_LENGTH = 16385; + final int RANDOM_SEED = 23453; + FSDataInputStream fsIn = null; + ZeroCopyCursor zcursor[] = { null, null, null, null, null }; + + DistributedFileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, RANDOM_SEED); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + for (int i = 0; i < zcursor.length; i++) { + zcursor[i] = fsIn.createZeroCopyCursor(); + zcursor[i].setAllowShortReads(false); + } + ClientMmapManager mmapManager = fs.getClient().getMmapManager(); + CountingVisitor countingVisitor = new CountingVisitor(); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + zcursor[0].read(4096); + fsIn.seek(0); + zcursor[1].read(4096); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(1, countingVisitor.count); + countingVisitor.reset(); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + countingVisitor.reset(); + + // The mmaps should be of the first block of the file. + final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, TEST_PATH); + mmapManager.visitMmaps(new ClientMmapManager.ClientMmapVisitor() { + @Override + public void accept(ClientMmap mmap) { + Assert.assertEquals(firstBlock, mmap.getBlock()); + } + }); + + // Read more blocks. + zcursor[2].read(4096); + zcursor[3].read(4096); + try { + zcursor[4].read(4096); + Assert.fail("expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + // expected + } + + // we should have 3 mmaps, 0 evictable + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(3, countingVisitor.count); + countingVisitor.reset(); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + + // After we close the cursors, the mmaps should be evictable for + // a brief period of time. Then, they should be closed (we're + // using a very quick timeout) + for (int i = 0; i < zcursor.length; i++) { + IOUtils.closeStream(zcursor[i]); + } + while (true) { + countingVisitor.reset(); + mmapManager.visitEvictable(countingVisitor); + if (0 == countingVisitor.count) { + break; + } + } + countingVisitor.reset(); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + + } } From a99edd1f403486f084f719a0647a9b7204bbc439 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 20 Aug 2013 18:11:47 +0000 Subject: [PATCH 05/51] Add CHANGES.txt for HDFS-4953 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1515907 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5f2fb795a39..24a6abd92c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,6 +120,9 @@ Trunk (Unreleased) HDFS-5004. Add additional JMX bean for NameNode status data (Trevor Lorimer via cos) + HDFS-4953. Enable HDFS local reads via mmap. + (Colin Patrick McCabe via wang). + OPTIMIZATIONS BUG FIXES From 920b4cc06f1bc15809902bdd1968cc434a694a08 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 22 Aug 2013 23:37:51 +0000 Subject: [PATCH 06/51] HDFS-5052. Add cacheRequest/uncacheRequest support to NameNode. (Contributed by Colin Patrick McCabe.) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1516669 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/util/Fallible.java | 53 +++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../AddPathCacheDirectiveException.java | 78 +++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 51 +++++ .../hdfs/protocol/PathCacheDirective.java | 110 ++++++++++ .../hadoop/hdfs/protocol/PathCacheEntry.java | 75 +++++++ .../RemovePathCacheEntryException.java | 68 +++++++ ...amenodeProtocolServerSideTranslatorPB.java | 113 ++++++++++- .../ClientNamenodeProtocolTranslatorPB.java | 191 ++++++++++++++++++ .../hdfs/server/namenode/CacheManager.java | 165 +++++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 24 ++- .../server/namenode/NameNodeRpcServer.java | 82 ++++++++ .../main/proto/ClientNamenodeProtocol.proto | 57 ++++++ .../namenode/TestPathCacheRequests.java | 150 ++++++++++++++ 14 files changed, 1218 insertions(+), 2 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java new file mode 100644 index 00000000000..fe343d9eeaf --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Contains either a value of type T, or an IOException. + * + * This can be useful as a return value for batch APIs that need granular + * error reporting. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +@InterfaceStability.Unstable +public class Fallible { + private final T val; + private final IOException ioe; + + public Fallible(T val) { + this.val = val; + this.ioe = null; + } + + public Fallible(IOException ioe) { + this.val = null; + this.ioe = ioe; + } + + public T get() throws IOException { + if (ioe != null) { + throw new IOException(ioe); + } + return this.val; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index d12d273abea..0e1805ee3ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -12,6 +12,9 @@ HDFS-4949 (Unreleased) HDFS-5051. Propagate cache status information from the DataNode to the NameNode (Andrew Wang via Colin Patrick McCabe) + HDFS-5052. Add cacheRequest/uncacheRequest support to NameNode. + (contributed by Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java new file mode 100644 index 00000000000..3e0531c20c8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +/** + * An exception which occurred when trying to add a path cache directive. + */ +public abstract class AddPathCacheDirectiveException extends IOException { + private static final long serialVersionUID = 1L; + + private final PathCacheDirective directive; + + public AddPathCacheDirectiveException(String description, + PathCacheDirective directive) { + super(description); + this.directive = directive; + } + + public PathCacheDirective getDirective() { + return directive; + } + + public static final class EmptyPathError + extends AddPathCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public EmptyPathError(PathCacheDirective directive) { + super("empty path in directive " + directive, directive); + } + } + + public static class InvalidPathNameError + extends AddPathCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public InvalidPathNameError(PathCacheDirective directive) { + super("can't handle non-absolute path name " + directive.getPath(), + directive); + } + } + + public static class InvalidPoolNameError + extends AddPathCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public InvalidPoolNameError(PathCacheDirective directive) { + super("invalid pool name '" + directive.getPool() + "'", directive); + } + } + + public static class UnexpectedAddPathCacheDirectiveException + extends AddPathCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public UnexpectedAddPathCacheDirectiveException( + PathCacheDirective directive) { + super("encountered an unexpected error when trying to " + + "add path cache directive " + directive, directive); + } + } +}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 5789c3615eb..165d0673f47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -19,6 +19,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -30,6 +31,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -46,6 +48,7 @@ import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; +import org.apache.hadoop.util.Fallible; /********************************************************************** * ClientProtocol is used by user code via @@ -1093,5 +1096,53 @@ public void disallowSnapshot(String snapshotRoot) @Idempotent public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException; + + /** + * Add some path cache directives to the CacheManager. + * + * @param directives + * A list of all the path cache directives we want to add. + * @return + * An list where each element is either a path cache entry that was + * added, or an IOException exception describing why the directive + * could not be added. + */ + @AtMostOnce + public List> + addPathCacheDirectives(List directives) + throws IOException; + + /** + * Remove some path cache entries from the CacheManager. + * + * @param ids + * A list of all the IDs we want to remove from the CacheManager. + * @return + * An list where each element is either an ID that was removed, + * or an IOException exception describing why the ID could not be + * removed. + */ + @AtMostOnce + public List> removePathCacheEntries(List ids) + throws IOException; + + /** + * List cached paths on the server. + * + * @param prevId + * The previous ID that we listed, or 0 if this is the first call + * to listPathCacheEntries. + * @param pool + * The pool ID to list. If this is the empty string, all pool ids + * will be listed. + * @param maxRepliesPerRequest + * The maximum number of replies to make in each request. + * @return + * A RemoteIterator from which you can get PathCacheEntry objects. + * Requests will be made as needed. + */ + @Idempotent + public RemoteIterator listPathCacheEntries(long prevId, + String pool, int maxRepliesPerRequest) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java new file mode 100644 index 00000000000..8045186a6c2 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ComparisonChain; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; + +/** + * A directive to add a path to a cache pool. + */ +public class PathCacheDirective implements Comparable { + private final String path; + + private final String pool; + + public PathCacheDirective(String path, String pool) throws IOException { + Preconditions.checkNotNull(path); + Preconditions.checkNotNull(pool); + this.path = path; + this.pool = pool; + } + + /** + * @return The path used in this request. + */ + public String getPath() { + return path; + } + + /** + * @return The pool used in this request. + */ + public String getPool() { + return pool; + } + + /** + * Check if this PathCacheDirective is valid. + * + * @throws IOException + * If this PathCacheDirective is not valid. + */ + public void validate() throws IOException { + if (path.isEmpty()) { + throw new EmptyPathError(this); + } + if (DFSUtil.isValidName(path)) { + throw new InvalidPathNameError(this); + } + + if (pool.isEmpty()) { + throw new InvalidPoolNameError(this); + } + } + + @Override + public int compareTo(PathCacheDirective rhs) { + return ComparisonChain.start(). + compare(pool, rhs.getPool()). + compare(path, rhs.getPath()). + result(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(path).append(pool).hashCode(); + } + + @Override + public boolean equals(Object o) { + try { + PathCacheDirective other = (PathCacheDirective)o; + return other.compareTo(this) == 0; + } catch (ClassCastException e) { + return false; + } + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("{ path:").append(path). + append(", pool:").append(pool). + append(" }"); + return builder.toString(); + } +}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java new file mode 100644 index 00000000000..62b8b0968b5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; + +import com.google.common.base.Preconditions; + +/** + * An entry in the NameNode's path cache. + */ +public final class PathCacheEntry { + private final long entryId; + private final PathCacheDirective directive; + + public PathCacheEntry(long entryId, PathCacheDirective directive) { + Preconditions.checkArgument(entryId > 0); + this.entryId = entryId; + this.directive = directive; + } + + public long getEntryId() { + return entryId; + } + + public PathCacheDirective getDirective() { + return directive; + } + + @Override + public boolean equals(Object o) { + try { + PathCacheEntry other = (PathCacheEntry)o; + return new EqualsBuilder(). + append(this.entryId, other.entryId). + append(this.directive, other.directive). + isEquals(); + } catch (ClassCastException e) { + return false; + } + } + + @Override + public int hashCode() { + return new HashCodeBuilder(). + append(entryId). + append(directive). + hashCode(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("{ entryId:").append(entryId). + append(", directive:").append(directive.toString()). + append(" }"); + return builder.toString(); + } +}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java new file mode 100644 index 00000000000..41f7269cdd1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import com.google.common.base.Preconditions; + +/** + * An exception which occurred when trying to remove a path cache entry. + */ +public abstract class RemovePathCacheEntryException extends IOException { + private static final long serialVersionUID = 1L; + + private final long entryId; + + public RemovePathCacheEntryException(String description, long entryId) { + super(description); + this.entryId = entryId; + } + + public long getEntryId() { + return this.entryId; + } + + public final static class InvalidIdException + extends RemovePathCacheEntryException { + private static final long serialVersionUID = 1L; + + public InvalidIdException(long entryId) { + super("invalid cache path entry id " + entryId, entryId); + } + } + + public final static class NoSuchIdException + extends RemovePathCacheEntryException { + private static final long serialVersionUID = 1L; + + public NoSuchIdException(long entryId) { + super("there is no path cache entry with id " + entryId, entryId); + } + } + + public final static class UnexpectedRemovePathCacheEntryException + extends RemovePathCacheEntryException { + private static final long serialVersionUID = 1L; + + public UnexpectedRemovePathCacheEntryException(long id) { + super("encountered an unexpected error when trying to " + + "remove path cache entry id " + id, id); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index d7a18a60ac8..c02bcecbe61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.protocolPB; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -25,6 +26,14 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -37,6 +46,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; @@ -92,6 +105,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; @@ -102,6 +118,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -142,6 +161,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; @@ -150,6 +170,7 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Fallible; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -1003,5 +1024,95 @@ public IsFileClosedResponseProto isFileClosed( throw new ServiceException(e); } } - + + @Override + public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller, + AddPathCacheDirectivesRequestProto request) throws ServiceException { + try { + ArrayList input = + new ArrayList(request.getElementsCount()); + for (int i = 0; i < request.getElementsCount(); i++) { + PathCacheDirectiveProto proto = request.getElements(i); + input.add(new PathCacheDirective(proto.getPath(), proto.getPool())); + } + List> output = server.addPathCacheDirectives(input); + AddPathCacheDirectivesResponseProto.Builder builder = + AddPathCacheDirectivesResponseProto.newBuilder(); + for (int idx = 0; idx < output.size(); idx++) { + try { + PathCacheEntry entry = output.get(idx).get(); + builder.addResults(entry.getEntryId()); + } catch (EmptyPathError ioe) { + builder.addResults(AddPathCacheDirectiveErrorProto. + EMPTY_PATH_ERROR_VALUE); + } catch (InvalidPathNameError ioe) { + builder.addResults(AddPathCacheDirectiveErrorProto. + INVALID_PATH_NAME_ERROR_VALUE); + } catch (InvalidPoolNameError ioe) { + builder.addResults(AddPathCacheDirectiveErrorProto. + INVALID_POOL_NAME_ERROR_VALUE); + } catch (IOException ioe) { + builder.addResults(AddPathCacheDirectiveErrorProto. + UNEXPECTED_ADD_ERROR_VALUE); + } + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RemovePathCacheEntriesResponseProto removePathCacheEntries( + RpcController controller, RemovePathCacheEntriesRequestProto request) + throws ServiceException { + try { + List> output = + server.removePathCacheEntries(request.getElementsList()); + RemovePathCacheEntriesResponseProto.Builder builder = + RemovePathCacheEntriesResponseProto.newBuilder(); + for (int idx = 0; idx < output.size(); idx++) { + try { + long id = output.get(idx).get(); + builder.addResults(id); + } catch (InvalidIdException ioe) { + builder.addResults(RemovePathCacheEntryErrorProto. + INVALID_CACHED_PATH_ID_ERROR_VALUE); + } catch (NoSuchIdException ioe) { + builder.addResults(RemovePathCacheEntryErrorProto. + NO_SUCH_CACHED_PATH_ID_ERROR_VALUE); + } catch (IOException ioe) { + builder.addResults(RemovePathCacheEntryErrorProto. + UNEXPECTED_REMOVE_ERROR_VALUE); + } + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller, + ListPathCacheEntriesRequestProto request) throws ServiceException { + try { + RemoteIterator iter = + server.listPathCacheEntries(request.getPrevId(), + request.getPool(), + request.getMaxReplies()); + ListPathCacheEntriesResponseProto.Builder builder = + ListPathCacheEntriesResponseProto.newBuilder(); + while (iter.hasNext()) { + PathCacheEntry entry = iter.next(); + builder.addElements( + ListPathCacheEntriesElementProto.newBuilder(). + setId(entry.getEntryId()). + setPath(entry.getDirective().getPath()). + setPool(entry.getDirective().getPool())); + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index b5be61a3bcd..eb9845e849b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -20,7 +20,10 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -30,9 +33,19 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -50,6 +63,10 @@ import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; @@ -87,11 +104,18 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; @@ -127,6 +151,7 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Fallible; import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; @@ -982,4 +1007,170 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, throw ProtobufHelper.getRemoteException(e); } } + + private static IOException addPathCacheDirectivesError(long code, + PathCacheDirective directive) { + if (code == AddPathCacheDirectiveErrorProto.EMPTY_PATH_ERROR_VALUE) { + return new EmptyPathError(directive); + } else if (code == AddPathCacheDirectiveErrorProto. + INVALID_PATH_NAME_ERROR_VALUE) { + return new InvalidPathNameError(directive); + } else if (code == AddPathCacheDirectiveErrorProto. + INVALID_POOL_NAME_ERROR_VALUE) { + return new InvalidPoolNameError(directive); + } else { + return new UnexpectedAddPathCacheDirectiveException(directive); + } + } + + @Override + public List> addPathCacheDirectives( + List directives) throws IOException { + try { + AddPathCacheDirectivesRequestProto.Builder builder = + AddPathCacheDirectivesRequestProto.newBuilder(); + for (PathCacheDirective directive : directives) { + builder.addElements(PathCacheDirectiveProto.newBuilder(). + setPath(directive.getPath()). + setPool(directive.getPool()). + build()); + } + AddPathCacheDirectivesResponseProto result = + rpcProxy.addPathCacheDirectives(null, builder.build()); + int resultsCount = result.getResultsCount(); + ArrayList> results = + new ArrayList>(resultsCount); + for (int i = 0; i < resultsCount; i++) { + PathCacheDirective directive = directives.get(i); + long code = result.getResults(i); + if (code > 0) { + results.add(new Fallible( + new PathCacheEntry(code, directive))); + } else { + results.add(new Fallible( + addPathCacheDirectivesError(code, directive))); + } + } + return results; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + private static IOException removePathCacheEntriesError(long code, long id) { + if (code == RemovePathCacheEntryErrorProto. + INVALID_CACHED_PATH_ID_ERROR_VALUE) { + return new InvalidIdException(id); + } else if (code == RemovePathCacheEntryErrorProto. + NO_SUCH_CACHED_PATH_ID_ERROR_VALUE) { + return new NoSuchIdException(id); + } else { + return new UnexpectedRemovePathCacheEntryException(id); + } + } + + @Override + public List> removePathCacheEntries(List ids) + throws IOException { + try { + RemovePathCacheEntriesRequestProto.Builder builder = + RemovePathCacheEntriesRequestProto.newBuilder(); + for (Long id : ids) { + builder.addElements(id); + } + RemovePathCacheEntriesResponseProto result = + rpcProxy.removePathCacheEntries(null, builder.build()); + int resultsCount = result.getResultsCount(); + ArrayList> results = + new ArrayList>(resultsCount); + for (int i = 0; i < resultsCount; i++) { + long code = result.getResults(i); + if (code > 0) { + results.add(new Fallible(code)); + } else { + results.add(new Fallible( + removePathCacheEntriesError(code, ids.get(i)))); + } + } + return results; + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + private class PathCacheEntriesIterator + implements RemoteIterator { + private long prevId; + private final String pool; + private final int repliesPerRequest; + private ListPathCacheEntriesResponseProto response; + private int idx; + + public PathCacheEntriesIterator(long prevId, String pool, + int repliesPerRequest) { + this.prevId = prevId; + this.pool = pool; + this.repliesPerRequest = repliesPerRequest; + this.response = null; + this.idx = -1; + } + + private void makeRequest() throws IOException { + idx = 0; + response = null; + try { + ListPathCacheEntriesRequestProto req = + ListPathCacheEntriesRequestProto.newBuilder(). + setPrevId(prevId). + setPool(pool). + setMaxReplies(repliesPerRequest). + build(); + response = rpcProxy.listPathCacheEntries(null, req); + if (response.getElementsCount() == 0) { + response = null; + } + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + private void makeRequestIfNeeded() throws IOException { + if (idx == -1) { + makeRequest(); + } else if ((response != null) && (idx >= response.getElementsCount())) { + if (response.getHasMore()) { + makeRequest(); + } else { + response = null; + } + } + } + + @Override + public boolean hasNext() throws IOException { + makeRequestIfNeeded(); + return (response != null); + } + + @Override + public PathCacheEntry next() throws IOException { + makeRequestIfNeeded(); + if (response == null) { + throw new NoSuchElementException(); + } + ListPathCacheEntriesElementProto elementProto = + response.getElements(idx); + prevId = elementProto.getId(); + idx++; + return new PathCacheEntry(elementProto.getId(), + new PathCacheDirective(elementProto.getPath(), + elementProto.getPool())); + } + } + + @Override + public RemoteIterator listPathCacheEntries(long prevId, + String pool, int repliesPerRequest) throws IOException { + return new PathCacheEntriesIterator(prevId, pool, repliesPerRequest); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java new file mode 100644 index 00000000000..8be575a9701 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; +import org.apache.hadoop.util.Fallible; + +/** + * The Cache Manager handles caching on DataNodes. + */ +final class CacheManager { + public static final Log LOG = LogFactory.getLog(CacheManager.class); + + /** + * Cache entries, sorted by ID. + * + * listPathCacheEntries relies on the ordering of elements in this map + * to track what has already been listed by the client. + */ + private final TreeMap entriesById = + new TreeMap(); + + /** + * Cache entries, sorted by directive. + */ + private final TreeMap entriesByDirective = + new TreeMap(); + + /** + * The entry ID to use for a new entry. + */ + private long nextEntryId; + + CacheManager(FSDirectory dir, Configuration conf) { + // TODO: support loading and storing of the CacheManager state + clear(); + } + + synchronized void clear() { + entriesById.clear(); + entriesByDirective.clear(); + nextEntryId = 1; + } + + synchronized long getNextEntryId() throws IOException { + if (nextEntryId == Long.MAX_VALUE) { + throw new IOException("no more available IDs"); + } + return nextEntryId++; + } + + private synchronized Fallible addDirective( + PathCacheDirective directive) { + try { + directive.validate(); + } catch (IOException ioe) { + return new Fallible(ioe); + } + // Check if we already have this entry. + PathCacheEntry existing = entriesByDirective.get(directive); + if (existing != null) { + // Entry already exists: return existing entry. + return new Fallible(existing); + } + // Add a new entry with the next available ID. + PathCacheEntry entry; + try { + entry = new PathCacheEntry(getNextEntryId(), directive); + } catch (IOException ioe) { + return new Fallible( + new UnexpectedAddPathCacheDirectiveException(directive)); + } + entriesByDirective.put(directive, entry); + entriesById.put(entry.getEntryId(), entry); + return new Fallible(entry); + } + + public synchronized List> addDirectives( + List directives) { + ArrayList> results = + new ArrayList>(directives.size()); + for (PathCacheDirective directive: directives) { + results.add(addDirective(directive)); + } + return results; + } + + private synchronized Fallible removeEntry(long entryId) { + // Check for invalid IDs. + if (entryId <= 0) { + return new Fallible(new InvalidIdException(entryId)); + } + // Find the entry. + PathCacheEntry existing = entriesById.get(entryId); + if (existing == null) { + return new Fallible(new NoSuchIdException(entryId)); + } + // Remove the corresponding entry in entriesByDirective. + if (entriesByDirective.remove(existing.getDirective()) == null) { + return new Fallible( + new UnexpectedRemovePathCacheEntryException(entryId)); + } + entriesById.remove(entryId); + return new Fallible(entryId); + } + + public synchronized List> removeEntries(List entryIds) { + ArrayList> results = + new ArrayList>(entryIds.size()); + for (Long entryId : entryIds) { + results.add(removeEntry(entryId)); + } + return results; + } + + public synchronized List listPathCacheEntries(long prevId, + String pool, int maxReplies) { + final int MAX_PRE_ALLOCATED_ENTRIES = 16; + ArrayList replies = + new ArrayList(Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); + int numReplies = 0; + SortedMap tailMap = entriesById.tailMap(prevId + 1); + for (Entry cur : tailMap.entrySet()) { + if (numReplies >= maxReplies) { + return replies; + } + if (pool.isEmpty() || cur.getValue().getDirective(). + getPool().equals(pool)) { + replies.add(cur.getValue()); + numReplies++; + } + } + return replies; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 989f688a0fd..b93d75c1256 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -129,6 +129,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; @@ -141,6 +142,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -223,6 +226,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; @@ -360,6 +364,7 @@ private void logAuditEvent(boolean succeeded, FSDirectory dir; private final BlockManager blockManager; private final SnapshotManager snapshotManager; + private final CacheManager cacheManager; private final DatanodeStatistics datanodeStatistics; // Block pool ID used by this namenode @@ -687,6 +692,7 @@ public static FSNamesystem loadFromDisk(Configuration conf) this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(fsImage, this, conf); this.snapshotManager = new SnapshotManager(dir); + this.cacheManager= new CacheManager(dir, conf); this.safeMode = new SafeModeInfo(conf); this.auditLoggers = initAuditLoggers(conf); this.isDefaultAuditLogger = auditLoggers.size() == 1 && @@ -6741,6 +6747,20 @@ void removeSnapshottableDirs(List toRemove) { } } + List> addPathCacheDirectives( + List directives) { + return cacheManager.addDirectives(directives); + } + + List> removePathCacheEntries(List ids) { + return cacheManager.removeEntries(ids); + } + + List listPathCacheEntries(long startId, String pool, + int maxReplies) { + return cacheManager.listPathCacheEntries(startId, pool, maxReplies); + } + /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the @@ -6777,7 +6797,9 @@ public void logAuditEvent(boolean succeeded, String userName, auditLog.info(sb); } } - } + public CacheManager getCacheManager() { + return cacheManager; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 3fd76a695bf..875f81642e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -31,6 +31,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; +import java.util.NoSuchElementException; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -44,6 +45,7 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; @@ -58,6 +60,8 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -133,6 +137,7 @@ import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; +import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; @@ -1200,4 +1205,81 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, metrics.incrSnapshotDiffReportOps(); return report; } + + @Override + public List> addPathCacheDirectives( + List paths) throws IOException { + return namesystem.addPathCacheDirectives(paths); + } + + @Override + public List> removePathCacheEntries(List ids) + throws IOException { + return namesystem.removePathCacheEntries(ids); + } + + private class PathCacheEntriesIterator + implements RemoteIterator { + private long prevId; + private final String pool; + private final int repliesPerRequest; + private List entries; + private int idx; + + public PathCacheEntriesIterator(long prevId, String pool, + int repliesPerRequest) { + this.prevId = prevId; + this.pool = pool; + this.repliesPerRequest = repliesPerRequest; + this.entries = null; + this.idx = -1; + } + + private void makeRequest() throws IOException { + idx = 0; + entries = null; + entries = namesystem.listPathCacheEntries(prevId, pool, + repliesPerRequest); + if (entries.isEmpty()) { + entries = null; + } + } + + private void makeRequestIfNeeded() throws IOException { + if (idx == -1) { + makeRequest(); + } else if ((entries != null) && (idx >= entries.size())) { + if (entries.size() < repliesPerRequest) { + // Last time, we got fewer entries than requested. + // So we should be at the end. + entries = null; + } else { + makeRequest(); + } + } + } + + @Override + public boolean hasNext() throws IOException { + makeRequestIfNeeded(); + return (entries != null); + } + + @Override + public PathCacheEntry next() throws IOException { + makeRequestIfNeeded(); + if (entries == null) { + throw new NoSuchElementException(); + } + PathCacheEntry entry = entries.get(idx++); + prevId = entry.getEntryId(); + return entry; + } + } + + @Override + public RemoteIterator listPathCacheEntries(long prevId, String pool, + int maxReplies) throws IOException { + return new PathCacheEntriesIterator(prevId, pool, maxReplies); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 95fcc50ebd3..9d1bfd5a354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -363,6 +363,57 @@ message IsFileClosedResponseProto { required bool result = 1; } +message PathCacheDirectiveProto { + required string path = 1; + required string pool = 2; +} + +message AddPathCacheDirectivesRequestProto { + repeated PathCacheDirectiveProto elements = 1; +} + +message AddPathCacheDirectivesResponseProto { + repeated int64 results = 1 [packed=true]; +} + +enum AddPathCacheDirectiveErrorProto { + EMPTY_PATH_ERROR = -1; + INVALID_PATH_NAME_ERROR = -2; + INVALID_POOL_NAME_ERROR = -3; + UNEXPECTED_ADD_ERROR = -4; +} + +message RemovePathCacheEntriesRequestProto { + repeated int64 elements = 1 [packed=true]; +} + +message RemovePathCacheEntriesResponseProto { + repeated int64 results = 1 [packed=true]; +} + +enum RemovePathCacheEntryErrorProto { + INVALID_CACHED_PATH_ID_ERROR = -1; + NO_SUCH_CACHED_PATH_ID_ERROR = -2; + UNEXPECTED_REMOVE_ERROR = -3; +} + +message ListPathCacheEntriesRequestProto { + required int64 prevId = 1; + required string pool = 2; + optional int32 maxReplies = 3; +} + +message ListPathCacheEntriesElementProto { + required int64 id = 1; + required string path = 2; + required string pool = 3; +} + +message ListPathCacheEntriesResponseProto { + repeated ListPathCacheEntriesElementProto elements = 1; + required bool hasMore = 2; +} + message GetFileLinkInfoRequestProto { required string src = 1; } @@ -544,6 +595,12 @@ service ClientNamenodeProtocol { returns(ListCorruptFileBlocksResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); + rpc addPathCacheDirectives(AddPathCacheDirectivesRequestProto) + returns (AddPathCacheDirectivesResponseProto); + rpc removePathCacheEntries(RemovePathCacheEntriesRequestProto) + returns (RemovePathCacheEntriesResponseProto); + rpc listPathCacheEntries(ListPathCacheEntriesRequestProto) + returns (ListPathCacheEntriesResponseProto); rpc getFileLinkInfo(GetFileLinkInfoRequestProto) returns(GetFileLinkInfoResponseProto); rpc getContentSummary(GetContentSummaryRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java new file mode 100644 index 00000000000..fe7ae38d7b5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.util.Fallible; +import org.junit.Test; + +public class TestPathCacheRequests { + static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); + + private static void validateListAll( + RemoteIterator iter, + long id0, long id1, long id2) throws Exception { + Assert.assertEquals(new PathCacheEntry(id0, + new PathCacheDirective("/alpha", "pool1")), + iter.next()); + Assert.assertEquals(new PathCacheEntry(id1, + new PathCacheDirective("/beta", "pool2")), + iter.next()); + Assert.assertEquals(new PathCacheEntry(id2, + new PathCacheDirective("/gamma", "pool1")), + iter.next()); + Assert.assertFalse(iter.hasNext()); + } + + @Test + public void testSetAndGet() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + NamenodeProtocols proto = cluster.getNameNodeRpc(); + List> addResults1 = + proto.addPathCacheDirectives(Arrays.asList( + new PathCacheDirective[] { + new PathCacheDirective("/alpha", "pool1"), + new PathCacheDirective("/beta", "pool2"), + new PathCacheDirective("", "pool3") + })); + long ids1[] = new long[2]; + ids1[0] = addResults1.get(0).get().getEntryId(); + ids1[1] = addResults1.get(1).get().getEntryId(); + try { + addResults1.get(2).get(); + Assert.fail("expected an error when adding an empty path"); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof EmptyPathError); + } + + List> addResults2 = + proto.addPathCacheDirectives(Arrays.asList( + new PathCacheDirective[] { + new PathCacheDirective("/alpha", "pool1"), + new PathCacheDirective("/theta", ""), + new PathCacheDirective("bogus", "pool1"), + new PathCacheDirective("/gamma", "pool1") + })); + long id = addResults2.get(0).get().getEntryId(); + Assert.assertEquals("expected to get back the same ID as last time " + + "when re-adding an existing path cache directive.", ids1[0], id); + try { + addResults2.get(1).get(); + Assert.fail("expected an error when adding a path cache " + + "directive with an empty pool name."); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); + } + try { + addResults2.get(2).get(); + Assert.fail("expected an error when adding a path cache " + + "directive with a non-absolute path name."); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof InvalidPathNameError); + } + long ids2[] = new long[1]; + ids2[0] = addResults2.get(3).get().getEntryId(); + + RemoteIterator iter = + proto.listPathCacheEntries(0, "", 100); + validateListAll(iter, ids1[0], ids1[1], ids2[0]); + iter = proto.listPathCacheEntries(0, "", 1); + validateListAll(iter, ids1[0], ids1[1], ids2[0]); + iter = proto.listPathCacheEntries(0, "pool3", 1); + Assert.assertFalse(iter.hasNext()); + iter = proto.listPathCacheEntries(0, "pool2", 4444); + Assert.assertEquals(addResults1.get(1).get(), + iter.next()); + Assert.assertFalse(iter.hasNext()); + + List> removeResults1 = + proto.removePathCacheEntries(Arrays.asList( + new Long[] { ids1[1], -42L, 999999L })); + Assert.assertEquals(Long.valueOf(ids1[1]), + removeResults1.get(0).get()); + try { + removeResults1.get(1).get(); + Assert.fail("expected an error when removing a negative ID"); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof InvalidIdException); + } + try { + removeResults1.get(2).get(); + Assert.fail("expected an error when removing a nonexistent ID"); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); + } + iter = proto.listPathCacheEntries(0, "pool2", 4444); + Assert.assertFalse(iter.hasNext()); + } finally { + if (cluster != null) { cluster.shutdown(); } + } + } +} From b992219fa13ccee2b417d91222fd0c3e8c3ffe11 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Sat, 24 Aug 2013 03:41:25 +0000 Subject: [PATCH 07/51] HDFS-5050. Add DataNode support for mlock and munlock (contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1517106 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hdfs/server/datanode/BPOfferService.java | 8 + .../hdfs/server/datanode/BPServiceActor.java | 4 + .../hadoop/hdfs/server/datanode/DNConf.java | 4 + .../datanode/fsdataset/FsDatasetSpi.java | 22 ++ .../fsdataset/impl/FsDatasetCache.java | 240 ++++++++++++++++ .../fsdataset/impl/FsDatasetImpl.java | 108 +++++++ .../datanode/fsdataset/impl/FsVolumeImpl.java | 36 ++- .../fsdataset/impl/MappableBlock.java | 249 ++++++++++++++++ .../datanode/metrics/FSDatasetMBean.java | 15 + .../server/protocol/DatanodeProtocol.java | 2 + .../src/main/proto/DatanodeProtocol.proto | 4 +- .../src/main/resources/hdfs-default.xml | 23 ++ .../hadoop/hdfs/LogVerificationAppender.java | 11 + .../server/datanode/SimulatedFSDataset.java | 32 +++ .../server/datanode/TestFsDatasetCache.java | 266 ++++++++++++++++++ 17 files changed, 1027 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 0e1805ee3ed..e23a97eb174 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -15,6 +15,9 @@ HDFS-4949 (Unreleased) HDFS-5052. Add cacheRequest/uncacheRequest support to NameNode. (contributed by Colin Patrick McCabe) + HDFS-5050. Add DataNode support for mlock and munlock + (Andrew Wang via Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 7d0fee4d11a..9302fdd80ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -100,6 +100,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false; public static final String DFS_DATANODE_MAX_LOCKED_MEMORY_KEY = "dfs.datanode.max.locked.memory"; public static final long DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT = 0; + public static final String DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY = "dfs.datanode.fsdatasetcache.max.threads.per.volume"; + public static final int DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT = 4; public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port"; public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index cd95e4b97b1..64ca7270754 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -549,6 +549,14 @@ private boolean processCommandFromActive(DatanodeCommand cmd, } dn.metrics.incrBlocksRemoved(toDelete.length); break; + case DatanodeProtocol.DNA_CACHE: + LOG.info("DatanodeCommand action: DNA_CACHE"); + dn.getFSDataset().cache(bcmd.getBlockPoolId(), bcmd.getBlocks()); + break; + case DatanodeProtocol.DNA_UNCACHE: + LOG.info("DatanodeCommand action: DNA_UNCACHE"); + dn.getFSDataset().uncache(bcmd.getBlockPoolId(), bcmd.getBlocks()); + break; case DatanodeProtocol.DNA_SHUTDOWN: // TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command // See HDFS-2987. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index dbda8280851..9d561f9626c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -449,6 +449,10 @@ DatanodeCommand cacheReport() throws IOException { long startTime = Time.monotonicNow(); if (startTime - lastCacheReport > dnConf.cacheReportInterval) { // TODO: Implement me! + String bpid = bpos.getBlockPoolId(); + BlockListAsLongs blocks = dn.getFSDataset().getCacheReport(bpid); + cmd = bpNamenode.cacheReport(bpRegistration, bpid, + blocks.getBlockListAsLongs()); } return cmd; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 8b21577b6a5..3aafeadcdcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -160,4 +160,8 @@ String getMinimumNameNodeVersion() { public long getXceiverStopTimeout() { return xceiverStopTimeout; } + + public long getMaxLockedMemory() { + return maxLockedMemory; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index 4f633973111..bf93f149fdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -269,6 +269,14 @@ public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen */ public BlockListAsLongs getBlockReport(String bpid); + /** + * Returns the cache report - the full list of cached blocks of a + * block pool + * @param bpid Block Pool Id + * @return - the cache report - the full list of cached blocks + */ + public BlockListAsLongs getCacheReport(String bpid); + /** Does the dataset contain the block? */ public boolean contains(ExtendedBlock block); @@ -294,6 +302,20 @@ public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen */ public void invalidate(String bpid, Block invalidBlks[]) throws IOException; + /** + * Caches the specified blocks + * @param bpid Block pool id + * @param cacheBlks - block to cache + */ + public void cache(String bpid, Block[] cacheBlks); + + /** + * Uncaches the specified blocks + * @param bpid Block pool id + * @param uncacheBlks - blocks to uncache + */ + public void uncache(String bpid, Block[] uncacheBlks); + /** * Check if all the data directories are healthy * @throws DiskErrorException diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java new file mode 100644 index 00000000000..938189a5d84 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CompletionService; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.DataNode; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Manages caching for an FsDatasetImpl by using the mmap(2) and mlock(2) + * system calls to lock blocks into memory. Block checksums are verified upon + * entry into the cache. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class FsDatasetCache { + + private static final Log LOG = LogFactory.getLog(FsDatasetCache.class); + + /** + * Map of cached blocks + */ + private final ConcurrentMap cachedBlocks; + + private final FsDatasetImpl dataset; + /** + * Number of cached bytes + */ + private AtomicLong usedBytes; + /** + * Total cache capacity in bytes + */ + private final long maxBytes; + + public FsDatasetCache(FsDatasetImpl dataset) { + this.dataset = dataset; + this.cachedBlocks = new ConcurrentHashMap(); + this.usedBytes = new AtomicLong(0); + this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory(); + } + + /** + * @return if the block is cached + */ + boolean isCached(String bpid, Block block) { + MappableBlock mapBlock = cachedBlocks.get(block.getBlockId()); + if (mapBlock != null) { + return mapBlock.getBlockPoolId().equals(bpid); + } + return false; + } + + /** + * @return List of cached blocks suitable for translation into a + * {@link BlockListAsLongs} for a cache report. + */ + List getCachedBlocks(String bpid) { + List blocks = new ArrayList(); + MappableBlock mapBlock = null; + // ConcurrentHashMap iteration doesn't see latest updates, which is okay + for (Iterator it = cachedBlocks.values().iterator(); + it.hasNext(); mapBlock = it.next()) { + if (mapBlock.getBlockPoolId().equals(bpid)) { + blocks.add(mapBlock.getBlock()); + } + } + return blocks; + } + + /** + * Asynchronously attempts to cache a block. This is subject to the + * configured maximum locked memory limit. + * + * @param block block to cache + * @param volume volume of the block + * @param blockIn stream of the block's data file + * @param metaIn stream of the block's meta file + */ + void cacheBlock(String bpid, Block block, FsVolumeImpl volume, + FileInputStream blockIn, FileInputStream metaIn) { + if (isCached(bpid, block)) { + return; + } + MappableBlock mapBlock = null; + try { + mapBlock = new MappableBlock(bpid, block, volume, blockIn, metaIn); + } catch (IOException e) { + LOG.warn("Failed to cache replica " + block + ": Could not instantiate" + + " MappableBlock", e); + IOUtils.closeQuietly(blockIn); + IOUtils.closeQuietly(metaIn); + return; + } + // Check if there's sufficient cache capacity + boolean success = false; + long bytes = mapBlock.getNumBytes(); + long used = usedBytes.get(); + while (used+bytes < maxBytes) { + if (usedBytes.compareAndSet(used, used+bytes)) { + success = true; + break; + } + used = usedBytes.get(); + } + if (!success) { + LOG.warn(String.format( + "Failed to cache replica %s: %s exceeded (%d + %d > %d)", + mapBlock.getBlock().toString(), + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + used, bytes, maxBytes)); + mapBlock.close(); + return; + } + // Submit it to the worker pool to be cached + volume.getExecutor().execute(new WorkerTask(mapBlock)); + } + + /** + * Uncaches a block if it is cached. + * @param block to uncache + */ + void uncacheBlock(String bpid, Block block) { + MappableBlock mapBlock = cachedBlocks.get(block.getBlockId()); + if (mapBlock != null && + mapBlock.getBlockPoolId().equals(bpid) && + mapBlock.getBlock().equals(block)) { + mapBlock.close(); + cachedBlocks.remove(mapBlock); + long bytes = mapBlock.getNumBytes(); + long used = usedBytes.get(); + while (!usedBytes.compareAndSet(used, used - bytes)) { + used = usedBytes.get(); + } + } + } + + /** + * Background worker that mmaps, mlocks, and checksums a block + */ + private class WorkerTask implements Runnable { + + private MappableBlock block; + WorkerTask(MappableBlock block) { + this.block = block; + } + + @Override + public void run() { + boolean success = false; + try { + block.map(); + block.lock(); + block.verifyChecksum(); + success = true; + } catch (ChecksumException e) { + // Exception message is bogus since this wasn't caused by a file read + LOG.warn("Failed to cache block " + block.getBlock() + ": Checksum " + + "verification failed."); + } catch (IOException e) { + LOG.warn("Failed to cache block " + block.getBlock() + ": IOException", + e); + } + // If we failed or the block became uncacheable in the meantime, + // clean up and return the reserved cache allocation + if (!success || + !dataset.validToCache(block.getBlockPoolId(), block.getBlock())) { + block.close(); + long used = usedBytes.get(); + while (!usedBytes.compareAndSet(used, used-block.getNumBytes())) { + used = usedBytes.get(); + } + } else { + cachedBlocks.put(block.getBlock().getBlockId(), block); + } + } + } + + // Stats related methods for FsDatasetMBean + + public long getCacheUsed() { + return usedBytes.get(); + } + + public long getCacheCapacity() { + return maxBytes; + } + + public long getCacheRemaining() { + return maxBytes - usedBytes.get(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 2a210b2bc7c..f5e0c371136 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -37,6 +37,7 @@ import javax.management.ObjectName; import javax.management.StandardMBean; +import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -170,6 +171,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) final FsVolumeList volumes; final ReplicaMap volumeMap; final FsDatasetAsyncDiskService asyncDiskService; + final FsDatasetCache cacheManager; private final int validVolsRequired; // Used for synchronizing access to usage stats @@ -228,6 +230,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) roots[idx] = storage.getStorageDir(idx).getCurrentDir(); } asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots); + cacheManager = new FsDatasetCache(this); registerMBean(storage.getStorageID()); } @@ -287,6 +290,30 @@ public int getNumFailedVolumes() { return volumes.numberOfFailedVolumes(); } + /** + * Returns the total cache used by the datanode (in bytes). + */ + @Override // FSDatasetMBean + public long getCacheUsed() { + return cacheManager.getCacheUsed(); + } + + /** + * Returns the total cache capacity of the datanode (in bytes). + */ + @Override // FSDatasetMBean + public long getCacheCapacity() { + return cacheManager.getCacheCapacity(); + } + + /** + * Returns the total amount of cache remaining (in bytes). + */ + @Override // FSDatasetMBean + public long getCacheRemaining() { + return cacheManager.getCacheRemaining(); + } + /** * Find the block's on-disk length */ @@ -534,6 +561,8 @@ public synchronized ReplicaInPipeline append(ExtendedBlock b, private synchronized ReplicaBeingWritten append(String bpid, FinalizedReplica replicaInfo, long newGS, long estimateBlockLen) throws IOException { + // uncache the block + cacheManager.uncacheBlock(bpid, replicaInfo); // unlink the finalized replica replicaInfo.unlinkBlock(1); @@ -1001,6 +1030,11 @@ public BlockListAsLongs getBlockReport(String bpid) { } } + @Override // FsDatasetSpi + public BlockListAsLongs getCacheReport(String bpid) { + return new BlockListAsLongs(cacheManager.getCachedBlocks(bpid), null); + } + /** * Get the list of finalized blocks from in-memory blockmap for a block pool. */ @@ -1143,6 +1177,8 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { volumeMap.remove(bpid, invalidBlks[i]); } + // Uncache the block synchronously + cacheManager.uncacheBlock(bpid, invalidBlks[i]); // Delete the block asynchronously to make sure we can do it fast enough asyncDiskService.deleteAsync(v, f, FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()), @@ -1153,6 +1189,78 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { } } + synchronized boolean validToCache(String bpid, Block blk) { + ReplicaInfo info = volumeMap.get(bpid, blk); + if (info == null) { + LOG.warn("Failed to cache replica " + blk + ": ReplicaInfo not found."); + return false; + } + FsVolumeImpl volume = (FsVolumeImpl)info.getVolume(); + if (volume == null) { + LOG.warn("Failed to cache replica " + blk + ": Volume not found."); + return false; + } + if (info.getState() != ReplicaState.FINALIZED) { + LOG.warn("Failed to cache replica " + blk + ": Replica is not" + + " finalized."); + return false; + } + return true; + } + + /** + * Asynchronously attempts to cache a single block via {@link FsDatasetCache}. + */ + private void cacheBlock(String bpid, Block blk) { + ReplicaInfo info; + FsVolumeImpl volume; + synchronized (this) { + if (!validToCache(bpid, blk)) { + return; + } + info = volumeMap.get(bpid, blk); + volume = (FsVolumeImpl)info.getVolume(); + } + // Try to open block and meta streams + FileInputStream blockIn = null; + FileInputStream metaIn = null; + boolean success = false; + try { + ExtendedBlock extBlk = new ExtendedBlock(bpid, blk); + blockIn = (FileInputStream)getBlockInputStream(extBlk, 0); + metaIn = (FileInputStream)getMetaDataInputStream(extBlk) + .getWrappedStream(); + success = true; + } catch (ClassCastException e) { + LOG.warn("Failed to cache replica " + blk + ": Underlying blocks" + + " are not backed by files.", e); + } catch (IOException e) { + LOG.warn("Failed to cache replica " + blk + ": IOException while" + + " trying to open block or meta files.", e); + } + if (!success) { + IOUtils.closeQuietly(blockIn); + IOUtils.closeQuietly(metaIn); + return; + } + cacheManager.cacheBlock(bpid, blk, volume, blockIn, metaIn); + } + + @Override // FsDatasetSpi + public void cache(String bpid, Block[] cacheBlks) { + for (int i=0; i/current private final DF usage; private final long reserved; + /** + * Per-volume worker pool that processes new blocks to cache. + * The maximum number of workers per volume is bounded (configurable via + * dfs.datanode.fsdatasetcache.max.threads.per.volume) to limit resource + * contention. + */ + private final ThreadPoolExecutor cacheExecutor; FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir, Configuration conf) throws IOException { @@ -59,6 +74,20 @@ class FsVolumeImpl implements FsVolumeSpi { this.currentDir = currentDir; File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); + final int maxNumThreads = dataset.datanode.getConf().getInt( + DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY, + DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT + ); + ThreadFactory workerFactory = new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d") + .build(); + cacheExecutor = new ThreadPoolExecutor( + 1, maxNumThreads, + 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + workerFactory); + cacheExecutor.allowCoreThreadTimeOut(true); } File getCurrentDir() { @@ -166,7 +195,11 @@ File createRbwFile(String bpid, Block b) throws IOException { File addBlock(String bpid, Block b, File f) throws IOException { return getBlockPoolSlice(bpid).addBlock(b, f); } - + + Executor getExecutor() { + return cacheExecutor; + } + void checkDirs() throws DiskErrorException { // TODO:FEDERATION valid synchronization for(BlockPoolSlice s : bpSlices.values()) { @@ -210,6 +243,7 @@ public String toString() { } void shutdown() { + cacheExecutor.shutdown(); Set> set = bpSlices.entrySet(); for (Entry entry : set) { entry.getValue().shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java new file mode 100644 index 00000000000..de0bcd35d7b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java @@ -0,0 +1,249 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; + +import java.io.BufferedInputStream; +import java.io.Closeable; +import java.io.DataInputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.channels.FileChannel.MapMode; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.ChecksumException; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.DataChecksum; + +import com.google.common.base.Preconditions; + +/** + * Low-level wrapper for a Block and its backing files that provides mmap, + * mlock, and checksum verification operations. + * + * This could be a private class of FsDatasetCache, not meant for other users. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +class MappableBlock implements Closeable { + + private final String bpid; + private final Block block; + private final FsVolumeImpl volume; + + private final FileInputStream blockIn; + private final FileInputStream metaIn; + private final FileChannel blockChannel; + private final FileChannel metaChannel; + private final long blockSize; + + private boolean isMapped; + private boolean isLocked; + private boolean isChecksummed; + + private MappedByteBuffer blockMapped = null; + + public MappableBlock(String bpid, Block blk, FsVolumeImpl volume, + FileInputStream blockIn, FileInputStream metaIn) throws IOException { + this.bpid = bpid; + this.block = blk; + this.volume = volume; + + this.blockIn = blockIn; + this.metaIn = metaIn; + this.blockChannel = blockIn.getChannel(); + this.metaChannel = metaIn.getChannel(); + this.blockSize = blockChannel.size(); + + this.isMapped = false; + this.isLocked = false; + this.isChecksummed = false; + } + + public String getBlockPoolId() { + return bpid; + } + + public Block getBlock() { + return block; + } + + public FsVolumeImpl getVolume() { + return volume; + } + + public boolean isMapped() { + return isMapped; + } + + public boolean isLocked() { + return isLocked; + } + + public boolean isChecksummed() { + return isChecksummed; + } + + /** + * Returns the number of bytes on disk for the block file + */ + public long getNumBytes() { + return blockSize; + } + + /** + * Maps the block into memory. See mmap(2). + */ + public void map() throws IOException { + if (isMapped) { + return; + } + blockMapped = blockChannel.map(MapMode.READ_ONLY, 0, blockSize); + isMapped = true; + } + + /** + * Unmaps the block from memory. See munmap(2). + */ + public void unmap() { + if (!isMapped) { + return; + } + if (blockMapped instanceof sun.nio.ch.DirectBuffer) { + sun.misc.Cleaner cleaner = + ((sun.nio.ch.DirectBuffer)blockMapped).cleaner(); + cleaner.clean(); + } + isMapped = false; + isLocked = false; + isChecksummed = false; + } + + /** + * Locks the block into memory. This prevents the block from being paged out. + * See mlock(2). + */ + public void lock() throws IOException { + Preconditions.checkArgument(isMapped, + "Block must be mapped before it can be locked!"); + if (isLocked) { + return; + } + NativeIO.POSIX.mlock(blockMapped, blockSize); + isLocked = true; + } + + /** + * Unlocks the block from memory, allowing it to be paged out. See munlock(2). + */ + public void unlock() throws IOException { + if (!isLocked || !isMapped) { + return; + } + NativeIO.POSIX.munlock(blockMapped, blockSize); + isLocked = false; + isChecksummed = false; + } + + /** + * Reads bytes into a buffer until EOF or the buffer's limit is reached + */ + private int fillBuffer(FileChannel channel, ByteBuffer buf) + throws IOException { + int bytesRead = channel.read(buf); + if (bytesRead < 0) { + //EOF + return bytesRead; + } + while (buf.remaining() > 0) { + int n = channel.read(buf); + if (n < 0) { + //EOF + return bytesRead; + } + bytesRead += n; + } + return bytesRead; + } + + /** + * Verifies the block's checksum. This is an I/O intensive operation. + * @return if the block was successfully checksummed. + */ + public void verifyChecksum() throws IOException, ChecksumException { + Preconditions.checkArgument(isLocked && isMapped, + "Block must be mapped and locked before checksum verification!"); + // skip if checksum has already been successfully verified + if (isChecksummed) { + return; + } + // Verify the checksum from the block's meta file + // Get the DataChecksum from the meta file header + metaChannel.position(0); + BlockMetadataHeader header = + BlockMetadataHeader.readHeader(new DataInputStream( + new BufferedInputStream(metaIn, BlockMetadataHeader + .getHeaderSize()))); + DataChecksum checksum = header.getChecksum(); + final int bytesPerChecksum = checksum.getBytesPerChecksum(); + final int checksumSize = checksum.getChecksumSize(); + final int numChunks = (8*1024*1024) / bytesPerChecksum; + ByteBuffer blockBuf = ByteBuffer.allocate(numChunks*bytesPerChecksum); + ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks*checksumSize); + // Verify the checksum + int bytesVerified = 0; + while (bytesVerified < blockChannel.size()) { + Preconditions.checkState(bytesVerified % bytesPerChecksum == 0, + "Unexpected partial chunk before EOF"); + assert bytesVerified % bytesPerChecksum == 0; + int bytesRead = fillBuffer(blockChannel, blockBuf); + if (bytesRead == -1) { + throw new IOException("Premature EOF"); + } + blockBuf.flip(); + // Number of read chunks, including partial chunk at end + int chunks = (bytesRead+bytesPerChecksum-1) / bytesPerChecksum; + checksumBuf.limit(chunks*bytesPerChecksum); + fillBuffer(metaChannel, checksumBuf); + checksumBuf.flip(); + checksum.verifyChunkedSums(blockBuf, checksumBuf, block.getBlockName(), + bytesVerified); + // Success + bytesVerified += bytesRead; + blockBuf.clear(); + checksumBuf.clear(); + } + isChecksummed = true; + // Can close the backing file since everything is safely in memory + blockChannel.close(); + } + + @Override + public void close() { + unmap(); + IOUtils.closeQuietly(blockIn); + IOUtils.closeQuietly(metaIn); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java index 2ff601df6e2..44325ce9bb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java @@ -77,4 +77,19 @@ public interface FSDatasetMBean { * @return The number of failed volumes in the datanode. */ public int getNumFailedVolumes(); + + /** + * Returns the total cache used by the datanode (in bytes). + */ + public long getCacheUsed(); + + /** + * Returns the total cache capacity of the datanode (in bytes). + */ + public long getCacheCapacity(); + + /** + * Returns the total amount of cache remaining (in bytes). + */ + public long getCacheRemaining(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 0bdda59d642..39992754e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -74,6 +74,8 @@ public interface DatanodeProtocol { final static int DNA_RECOVERBLOCK = 6; // request a block recovery final static int DNA_ACCESSKEYUPDATE = 7; // update access key final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth + final static int DNA_CACHE = 9; // cache blocks + final static int DNA_UNCACHE = 10; // uncache blocks /** * Register Datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 4c0c5a43a50..aeb4028de74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -101,7 +101,9 @@ message BlockCommandProto { enum Action { TRANSFER = 1; // Transfer blocks to another datanode INVALIDATE = 2; // Invalidate blocks - SHUTDOWN = 3; // Shutdown the datanode + SHUTDOWN = 3; // Shutdown the datanode + CACHE = 4; // Cache blocks on the datanode + UNCACHE = 5; // Uncache blocks on the datanode } required Action action = 1; required string blockPoolId = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index a2dcf8c347c..6e7a53debfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1419,4 +1419,27 @@ + + dfs.datanode.max.locked.memory + 0 + + The amount of memory in bytes to use for caching of block replicas in + memory on the datanode. The datanode's maximum locked memory soft ulimit + (RLIMIT_MEMLOCK) must be set to at least this value, else the datanode + will abort on startup. + + By default, this parameter set to 0, which disables in-memory caching. + + + + + dfs.datanode.fsdatasetcache.max.threads.per.volume + 4 + + The maximum number of threads per volume to use for caching new data + on the datanode. These threads consume both I/O and CPU. This can affect + normal datanode operations. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java index d6698b88c4b..10ef47bbbc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/LogVerificationAppender.java @@ -61,4 +61,15 @@ public int countExceptionsWithMessage(final String text) { } return count; } + + public int countLinesWithMessage(final String text) { + int count = 0; + for (LoggingEvent e: getLog()) { + String msg = e.getRenderedMessage(); + if (msg != null && msg.contains(text)) { + count++; + } + } + return count; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 274e5a3a6bd..d3a9135d214 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -465,6 +465,11 @@ public synchronized BlockListAsLongs getBlockReport(String bpid) { return new BlockListAsLongs(blocks, null); } + @Override // FsDatasetSpi + public BlockListAsLongs getCacheReport(String bpid) { + return new BlockListAsLongs(); + } + @Override // FSDatasetMBean public long getCapacity() { return storage.getCapacity(); @@ -490,6 +495,21 @@ public int getNumFailedVolumes() { return storage.getNumFailedVolumes(); } + @Override // FSDatasetMBean + public long getCacheUsed() { + return 0l; + } + + @Override // FSDatasetMBean + public long getCacheCapacity() { + return 0l; + } + + @Override // FSDatasetMBean + public long getCacheRemaining() { + return 0l; + } + @Override // FsDatasetSpi public synchronized long getLength(ExtendedBlock b) throws IOException { final Map map = getMap(b.getBlockPoolId()); @@ -559,6 +579,18 @@ public synchronized void invalidate(String bpid, Block[] invalidBlks) } } + @Override // FSDatasetSpi + public void cache(String bpid, Block[] cacheBlks) { + throw new UnsupportedOperationException( + "SimulatedFSDataset does not support cache operation!"); + } + + @Override // FSDatasetSpi + public void uncache(String bpid, Block[] uncacheBlks) { + throw new UnsupportedOperationException( + "SimulatedFSDataset does not support uncache operation!"); + } + private BInfo getBInfo(final ExtendedBlock b) { final Map map = blockMap.get(b.getBlockPoolId()); return map == null? null: map.get(b.getLocalBlock()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java new file mode 100644 index 00000000000..af7d323c8a5 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -0,0 +1,266 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.datanode; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.*; +import static org.mockito.Mockito.doReturn; + +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.channels.FileChannel; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.HdfsBlockLocation; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.LogVerificationAppender; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.namenode.FSImage; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; +import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestFsDatasetCache { + + // Most Linux installs allow a default of 64KB locked memory + private static final long CACHE_CAPACITY = 64 * 1024; + private static final long BLOCK_SIZE = 4096; + + private static Configuration conf; + private static MiniDFSCluster cluster = null; + private static FileSystem fs; + private static NameNode nn; + private static FSImage fsImage; + private static DataNode dn; + private static FsDatasetSpi fsd; + private static DatanodeProtocolClientSideTranslatorPB spyNN; + + @Before + public void setUp() throws Exception { + conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + CACHE_CAPACITY); + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + cluster.waitActive(); + + fs = cluster.getFileSystem(); + nn = cluster.getNameNode(); + fsImage = nn.getFSImage(); + dn = cluster.getDataNodes().get(0); + fsd = dn.getFSDataset(); + + spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + private static void setHeartbeatResponse(DatanodeCommand[] cmds) + throws IOException { + HeartbeatResponse response = new HeartbeatResponse( + cmds, + new NNHAStatusHeartbeat(HAServiceState.ACTIVE, + fsImage.getLastAppliedOrWrittenTxId())); + doReturn(response).when(spyNN).sendHeartbeat( + (DatanodeRegistration) any(), + (StorageReport[]) any(), + anyInt(), anyInt(), anyInt()); + } + + private static DatanodeCommand[] cacheBlock(HdfsBlockLocation loc) { + return cacheBlocks(new HdfsBlockLocation[] {loc}); + } + + private static DatanodeCommand[] cacheBlocks(HdfsBlockLocation[] locs) { + return new DatanodeCommand[] { + getResponse(locs, DatanodeProtocol.DNA_CACHE) + }; + } + + private static DatanodeCommand[] uncacheBlock(HdfsBlockLocation loc) { + return uncacheBlocks(new HdfsBlockLocation[] {loc}); + } + + private static DatanodeCommand[] uncacheBlocks(HdfsBlockLocation[] locs) { + return new DatanodeCommand[] { + getResponse(locs, DatanodeProtocol.DNA_UNCACHE) + }; + } + + /** + * Creates a cache or uncache DatanodeCommand from an array of locations + */ + private static DatanodeCommand getResponse(HdfsBlockLocation[] locs, + int action) { + String bpid = locs[0].getLocatedBlock().getBlock().getBlockPoolId(); + Block[] blocks = new Block[locs.length]; + for (int i=0; i Date: Fri, 30 Aug 2013 22:15:51 +0000 Subject: [PATCH 08/51] HDFS-5141. Add cache status information to datanode heartbeat. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1519101 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/protocol/DatanodeInfo.java | 81 ++++++++++++++++++- ...atanodeProtocolClientSideTranslatorPB.java | 8 +- ...atanodeProtocolServerSideTranslatorPB.java | 13 ++- .../hadoop/hdfs/protocolPB/PBHelper.java | 12 ++- .../blockmanagement/DatanodeDescriptor.java | 25 ++++-- .../blockmanagement/DatanodeManager.java | 7 +- .../blockmanagement/HeartbeatManager.java | 6 +- .../hdfs/server/datanode/BPServiceActor.java | 6 +- .../hdfs/server/namenode/FSNamesystem.java | 6 +- .../server/namenode/NameNodeRpcServer.java | 9 ++- .../server/protocol/DatanodeProtocol.java | 1 + .../org/apache/hadoop/hdfs/web/JsonUtil.java | 4 + .../src/main/proto/DatanodeProtocol.proto | 8 ++ .../hadoop-hdfs/src/main/proto/hdfs.proto | 2 + .../org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- .../blockmanagement/TestBlockManager.java | 4 +- .../TestOverReplicatedBlocks.java | 2 +- .../TestReplicationPolicy.java | 20 ++--- .../TestReplicationPolicyWithNodeGroup.java | 22 ++--- .../hdfs/server/common/TestJspHelper.java | 4 +- .../server/datanode/TestBPOfferService.java | 2 + .../server/datanode/TestBlockRecovery.java | 2 + .../server/datanode/TestFsDatasetCache.java | 2 + .../namenode/NNThroughputBenchmark.java | 7 +- .../hdfs/server/namenode/NameNodeAdapter.java | 3 +- .../server/namenode/TestDeadDatanode.java | 5 +- 27 files changed, 209 insertions(+), 57 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index e23a97eb174..769996d46a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -18,6 +18,9 @@ HDFS-4949 (Unreleased) HDFS-5050. Add DataNode support for mlock and munlock (Andrew Wang via Colin Patrick McCabe) + HDFS-5141. Add cache status information to datanode heartbeat. + (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 5172bc59f24..3964972c070 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -44,6 +44,8 @@ public class DatanodeInfo extends DatanodeID implements Node { private long dfsUsed; private long remaining; private long blockPoolUsed; + private long cacheCapacity; + private long cacheUsed; private long lastUpdate; private int xceiverCount; private String location = NetworkTopology.DEFAULT_RACK; @@ -81,6 +83,8 @@ public DatanodeInfo(DatanodeInfo from) { this.dfsUsed = from.getDfsUsed(); this.remaining = from.getRemaining(); this.blockPoolUsed = from.getBlockPoolUsed(); + this.cacheCapacity = from.getCacheCapacity(); + this.cacheUsed = from.getCacheUsed(); this.lastUpdate = from.getLastUpdate(); this.xceiverCount = from.getXceiverCount(); this.location = from.getNetworkLocation(); @@ -93,6 +97,8 @@ public DatanodeInfo(DatanodeID nodeID) { this.dfsUsed = 0L; this.remaining = 0L; this.blockPoolUsed = 0L; + this.cacheCapacity = 0L; + this.cacheUsed = 0L; this.lastUpdate = 0L; this.xceiverCount = 0; this.adminState = null; @@ -105,24 +111,29 @@ public DatanodeInfo(DatanodeID nodeID, String location) { public DatanodeInfo(DatanodeID nodeID, String location, final long capacity, final long dfsUsed, final long remaining, - final long blockPoolUsed, final long lastUpdate, final int xceiverCount, + final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, + final long lastUpdate, final int xceiverCount, final AdminStates adminState) { this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, - blockPoolUsed, lastUpdate, xceiverCount, location, adminState); + blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate, xceiverCount, + location, adminState); } /** Constructor */ public DatanodeInfo(final String ipAddr, final String hostName, final String storageID, final int xferPort, final int infoPort, final int ipcPort, final long capacity, final long dfsUsed, final long remaining, - final long blockPoolUsed, final long lastUpdate, final int xceiverCount, + final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, + final long lastUpdate, final int xceiverCount, final String networkLocation, final AdminStates adminState) { super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort); this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; this.blockPoolUsed = blockPoolUsed; + this.cacheCapacity = cacheCapacity; + this.cacheUsed = cacheUsed; this.lastUpdate = lastUpdate; this.xceiverCount = xceiverCount; this.location = networkLocation; @@ -168,6 +179,42 @@ public float getRemainingPercent() { return DFSUtil.getPercentRemaining(remaining, capacity); } + /** + * @return Amount of cache capacity in bytes + */ + public long getCacheCapacity() { + return cacheCapacity; + } + + /** + * @return Amount of cache used in bytes + */ + public long getCacheUsed() { + return cacheUsed; + } + + /** + * @return Cache used as a percentage of the datanode's total cache capacity + */ + public float getCacheUsedPercent() { + return DFSUtil.getPercentUsed(cacheUsed, cacheCapacity); + } + + /** + * @return Amount of cache remaining in bytes + */ + public long getCacheRemaining() { + return cacheCapacity - cacheUsed; + } + + /** + * @return Cache remaining as a percentage of the datanode's total cache + * capacity + */ + public float getCacheRemainingPercent() { + return DFSUtil.getPercentRemaining(getCacheRemaining(), cacheCapacity); + } + /** The time when this information was accurate. */ public long getLastUpdate() { return lastUpdate; } @@ -194,6 +241,16 @@ public void setBlockPoolUsed(long bpUsed) { this.blockPoolUsed = bpUsed; } + /** Sets cache capacity. */ + public void setCacheCapacity(long cacheCapacity) { + this.cacheCapacity = cacheCapacity; + } + + /** Sets cache used. */ + public void setCacheUsed(long cacheUsed) { + this.cacheUsed = cacheUsed; + } + /** Sets time when this information was accurate. */ public void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; @@ -223,6 +280,11 @@ public String getDatanodeReport() { long nonDFSUsed = getNonDfsUsed(); float usedPercent = getDfsUsedPercent(); float remainingPercent = getRemainingPercent(); + long cc = getCacheCapacity(); + long cr = getCacheRemaining(); + long cu = getCacheUsed(); + float cacheUsedPercent = getCacheUsedPercent(); + float cacheRemainingPercent = getCacheRemainingPercent(); String lookupName = NetUtils.getHostNameOfIP(getName()); buffer.append("Name: "+ getName()); @@ -249,6 +311,12 @@ public String getDatanodeReport() { buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n"); buffer.append("DFS Used%: "+percent2String(usedPercent) + "\n"); buffer.append("DFS Remaining%: "+percent2String(remainingPercent) + "\n"); + buffer.append("Configured Cache Capacity: "+c+" ("+StringUtils.byteDesc(cc)+")"+"\n"); + buffer.append("Cache Used: "+cu+" ("+StringUtils.byteDesc(u)+")"+"\n"); + buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(r)+")"+"\n"); + buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n"); + buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n"); + buffer.append("Last contact: "+new Date(lastUpdate)+"\n"); return buffer.toString(); } @@ -259,6 +327,9 @@ public String dumpDatanode() { long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); + long cc = getCacheCapacity(); + long cr = getCacheRemaining(); + long cu = getCacheUsed(); buffer.append(getName()); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); @@ -274,6 +345,10 @@ public String dumpDatanode() { buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); buffer.append(" " + percent2String(u/(double)c)); buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); + buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")"); + buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")"); + buffer.append(" " + percent2String(cu/(double)cc)); + buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")"); buffer.append(" " + new Date(lastUpdate)); return buffer.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index cf3921cf12e..1578d24e908 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -155,8 +156,8 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration registration @Override public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, - StorageReport[] reports, int xmitsInProgress, int xceiverCount, - int failedVolumes) throws IOException { + StorageReport[] reports, CacheReport[] cacheReports, int xmitsInProgress, + int xceiverCount, int failedVolumes) throws IOException { HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder() .setRegistration(PBHelper.convert(registration)) .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount) @@ -164,6 +165,9 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, for (StorageReport r : reports) { builder.addReports(PBHelper.convert(r)); } + for (CacheReport r : cacheReports) { + builder.addCacheReports(PBHelper.convert(r)); + } HeartbeatResponseProto resp; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 78bfe1f2c01..8aafcc36154 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -111,9 +113,16 @@ public HeartbeatResponseProto sendHeartbeat(RpcController controller, p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed()); } + List cacheList = request.getCacheReportsList(); + CacheReport[] cacheReport = new CacheReport[list.size()]; + i = 0; + for (CacheReportProto p : cacheList) { + cacheReport[i++] = new CacheReport(p.getCacheCapacity(), + p.getCacheUsed()); + } response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()), - report, request.getXmitsInProgress(), request.getXceiverCount(), - request.getFailedVolumes()); + report, cacheReport, request.getXmitsInProgress(), + request.getXceiverCount(), request.getFailedVolumes()); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 613edb1fa18..4051d01e031 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto; @@ -121,6 +122,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -469,7 +471,8 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { PBHelper.convert(di.getId()), di.hasLocation() ? di.getLocation() : null , di.getCapacity(), di.getDfsUsed(), di.getRemaining(), - di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , + di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(), + di.getLastUpdate(), di.getXceiverCount(), PBHelper.convert(di.getAdminState())); } @@ -1361,6 +1364,13 @@ public static StorageReportProto convert(StorageReport r) { .setStorageID(r.getStorageID()).build(); } + public static CacheReportProto convert(CacheReport r) { + return CacheReportProto.newBuilder() + .setCacheCapacity(r.getCapacity()) + .setCacheUsed(r.getUsed()) + .build(); + } + public static JournalInfo convert(JournalInfoProto info) { int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0; int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index c542ae343e1..713a156cc46 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -159,7 +159,7 @@ synchronized void clear() { * @param nodeID id of the data node */ public DatanodeDescriptor(DatanodeID nodeID) { - this(nodeID, 0L, 0L, 0L, 0L, 0, 0); + this(nodeID, 0L, 0L, 0L, 0L, 0L, 0L, 0, 0); } /** @@ -169,7 +169,7 @@ public DatanodeDescriptor(DatanodeID nodeID) { */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation) { - this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0); + this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0L, 0L, 0, 0); } /** @@ -179,6 +179,8 @@ public DatanodeDescriptor(DatanodeID nodeID, * @param dfsUsed space used by the data node * @param remaining remaining capacity of the data node * @param bpused space used by the block pool corresponding to this namenode + * @param cacheCapacity cache capacity of the data node + * @param cacheUsed cache used on the data node * @param xceiverCount # of data transfers at the data node */ public DatanodeDescriptor(DatanodeID nodeID, @@ -186,11 +188,13 @@ public DatanodeDescriptor(DatanodeID nodeID, long dfsUsed, long remaining, long bpused, + long cacheCapacity, + long cacheUsed, int xceiverCount, int failedVolumes) { super(nodeID); - updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, - failedVolumes); + updateHeartbeat(capacity, dfsUsed, remaining, bpused, cacheCapacity, + cacheUsed, xceiverCount, failedVolumes); } /** @@ -201,6 +205,8 @@ public DatanodeDescriptor(DatanodeID nodeID, * @param dfsUsed the used space by dfs datanode * @param remaining remaining capacity of the data node * @param bpused space used by the block pool corresponding to this namenode + * @param cacheCapacity cache capacity of the data node + * @param cacheUsed cache used on the data node * @param xceiverCount # of data transfers at the data node */ public DatanodeDescriptor(DatanodeID nodeID, @@ -209,11 +215,13 @@ public DatanodeDescriptor(DatanodeID nodeID, long dfsUsed, long remaining, long bpused, + long cacheCapacity, + long cacheUsed, int xceiverCount, int failedVolumes) { super(nodeID, networkLocation); - updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, - failedVolumes); + updateHeartbeat(capacity, dfsUsed, remaining, bpused, cacheCapacity, + cacheUsed, xceiverCount, failedVolumes); } /** @@ -302,11 +310,14 @@ public int numBlocks() { * Updates stats from datanode heartbeat. */ public void updateHeartbeat(long capacity, long dfsUsed, long remaining, - long blockPoolUsed, int xceiverCount, int volFailures) { + long blockPoolUsed, long cacheCapacity, long cacheUsed, int xceiverCount, + int volFailures) { setCapacity(capacity); setRemaining(remaining); setBlockPoolUsed(blockPoolUsed); setDfsUsed(dfsUsed); + setCacheCapacity(cacheCapacity); + setCacheUsed(cacheUsed); setXceiverCount(xceiverCount); setLastUpdate(Time.now()); this.volumeFailures = volFailures; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 9d5024fb9b1..52858139001 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1145,8 +1145,8 @@ private void setDatanodeDead(DatanodeDescriptor node) { public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, final String blockPoolId, long capacity, long dfsUsed, long remaining, long blockPoolUsed, - int xceiverCount, int maxTransfers, int failedVolumes - ) throws IOException { + long cacheCapacity, long cacheUsed, int xceiverCount, int maxTransfers, + int failedVolumes) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { DatanodeDescriptor nodeinfo = null; @@ -1167,7 +1167,8 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, } heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, - remaining, blockPoolUsed, xceiverCount, failedVolumes); + remaining, blockPoolUsed, cacheCapacity, cacheUsed, xceiverCount, + failedVolumes); // If we are in safemode, do not send back any recovery / replication // requests. Don't even drain the existing queue of work. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 0bff1bf52f7..f9c28e99692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -170,7 +170,7 @@ synchronized void register(final DatanodeDescriptor d) { addDatanode(d); //update its timestamp - d.updateHeartbeat(0L, 0L, 0L, 0L, 0, 0); + d.updateHeartbeat(0L, 0L, 0L, 0L, 0L, 0L, 0, 0); } } @@ -193,10 +193,10 @@ synchronized void removeDatanode(DatanodeDescriptor node) { synchronized void updateHeartbeat(final DatanodeDescriptor node, long capacity, long dfsUsed, long remaining, long blockPoolUsed, - int xceiverCount, int failedVolumes) { + long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes) { stats.subtract(node); node.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed, - xceiverCount, failedVolumes); + cacheCapacity, cacheUsed, xceiverCount, failedVolumes); stats.add(node); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 9d561f9626c..81207d37cbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -468,7 +469,10 @@ HeartbeatResponse sendHeartBeat() throws IOException { dn.getFSDataset().getDfsUsed(), dn.getFSDataset().getRemaining(), dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) }; - return bpNamenode.sendHeartbeat(bpRegistration, report, + CacheReport[] cacheReport = { new CacheReport( + dn.getFSDataset().getCacheCapacity(), + dn.getFSDataset().getCacheUsed()) }; + return bpNamenode.sendHeartbeat(bpRegistration, report, cacheReport, dn.getXmitsInProgress(), dn.getXceiverCount(), dn.getFSDataset().getNumFailedVolumes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index b93d75c1256..9e817629d10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3962,15 +3962,15 @@ String getRegistrationID() { */ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, - int xceiverCount, int xmitsInProgress, int failedVolumes) - throws IOException { + long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, + int failedVolumes) throws IOException { readLock(); try { final int maxTransfer = blockManager.getMaxReplicationStreams() - xmitsInProgress; DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat( nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed, - xceiverCount, maxTransfer, failedVolumes); + cacheCapacity, cacheUsed, xceiverCount, maxTransfer, failedVolumes); return new HeartbeatResponse(cmds, createHaStatusHeartbeat()); } finally { readUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 875f81642e5..b96df2a6aa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -100,6 +100,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -935,13 +936,13 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) @Override // DatanodeProtocol public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg, - StorageReport[] report, int xmitsInProgress, int xceiverCount, - int failedVolumes) throws IOException { + StorageReport[] report, CacheReport[] cacheReport, int xmitsInProgress, + int xceiverCount, int failedVolumes) throws IOException { verifyRequest(nodeReg); return namesystem.handleHeartbeat(nodeReg, report[0].getCapacity(), report[0].getDfsUsed(), report[0].getRemaining(), - report[0].getBlockPoolUsed(), xceiverCount, xmitsInProgress, - failedVolumes); + report[0].getBlockPoolUsed(), cacheReport[0].getCapacity(), + cacheReport[0].getUsed(), xceiverCount, xmitsInProgress, failedVolumes); } @Override // DatanodeProtocol diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 39992754e7d..9e74967dacd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -106,6 +106,7 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration registration @Idempotent public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, + CacheReport[] cacheReports, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 840087393cd..208f285a9b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -301,6 +301,8 @@ private static Map toJsonMap(final DatanodeInfo datanodeinfo) { m.put("dfsUsed", datanodeinfo.getDfsUsed()); m.put("remaining", datanodeinfo.getRemaining()); m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed()); + m.put("cacheCapacity", datanodeinfo.getCacheCapacity()); + m.put("cacheUsed", datanodeinfo.getCacheUsed()); m.put("lastUpdate", datanodeinfo.getLastUpdate()); m.put("xceiverCount", datanodeinfo.getXceiverCount()); m.put("networkLocation", datanodeinfo.getNetworkLocation()); @@ -326,6 +328,8 @@ private static DatanodeInfo toDatanodeInfo(final Map m) { (Long)m.get("dfsUsed"), (Long)m.get("remaining"), (Long)m.get("blockPoolUsed"), + (Long)m.get("cacheCapacity"), + (Long)m.get("cacheUsed"), (Long)m.get("lastUpdate"), (int)(long)(Long)m.get("xceiverCount"), (String)m.get("networkLocation"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index aeb4028de74..3dcf9bb648b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -164,6 +164,8 @@ message RegisterDatanodeResponseProto { * xmitsInProgress - number of transfers from this datanode to others * xceiverCount - number of active transceiver threads * failedVolumes - number of failed volumes + * cacheCapacity - total cache capacity available at the datanode + * cacheUsed - amount of cache used */ message HeartbeatRequestProto { required DatanodeRegistrationProto registration = 1; // Datanode info @@ -171,6 +173,7 @@ message HeartbeatRequestProto { optional uint32 xmitsInProgress = 3 [ default = 0 ]; optional uint32 xceiverCount = 4 [ default = 0 ]; optional uint32 failedVolumes = 5 [ default = 0 ]; + repeated CacheReportProto cacheReports = 6; } message StorageReportProto { @@ -182,6 +185,11 @@ message StorageReportProto { optional uint64 blockPoolUsed = 6 [ default = 0 ]; } +message CacheReportProto { + optional uint64 cacheCapacity = 1 [default = 0 ]; + optional uint64 cacheUsed = 2 [default = 0 ]; +} + /** * state - State the NN is in when returning response to the DN * txid - Highest transaction ID this NN has seen diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 085f629a8d3..60e1e1f087e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -82,6 +82,8 @@ message DatanodeInfoProto { } optional AdminState adminState = 10 [default = NORMAL]; + optional uint64 cacheCapacity = 11 [default = 0]; + optional uint64 cacheUsed = 12 [default = 0]; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index f0c10b0a2fe..c033f37ac18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -847,7 +847,7 @@ public static DatanodeInfo getLocalDatanodeInfo(String ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT, - 1, 2, 3, 4, 5, 6, "local", adminState); + 1l, 2l, 3l, 4l, 0l, 0l, 5, 6, "local", adminState); } public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index e88ec92e39b..89a46efaf4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -98,7 +98,9 @@ private void addNodes(Iterable nodesToAdd) { cluster.add(dn); dn.updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 0L, 0L, + 0, 0); bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 79785961c91..7148b8271b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -104,7 +104,7 @@ public void testProcesOverReplicateBlock() throws Exception { String corruptMachineName = corruptDataNode.getXferAddr(); for (DatanodeDescriptor datanode : hm.getDatanodes()) { if (!corruptMachineName.equals(datanode.getXferAddr())) { - datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0); + datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0L, 0L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ba6c3737266..a284a09f740 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -116,7 +116,7 @@ public static void setupCluster() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { dataNodes[i].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -133,7 +133,8 @@ public static void setupCluster() throws Exception { public void testChooseTarget1() throws Exception { dataNodes[0].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, + 0L, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; targets = replicator.chooseTarget(filename, 0, dataNodes[0], @@ -168,7 +169,7 @@ public void testChooseTarget1() throws Exception { dataNodes[0].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } private static DatanodeDescriptor[] chooseTarget( @@ -271,7 +272,8 @@ public void testChooseTarget3() throws Exception { // make data node 0 to be not qualified to choose dataNodes[0].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space + (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, + 0L, 0L, 0, 0); // no space DatanodeDescriptor[] targets; targets = replicator.chooseTarget(filename, 0, dataNodes[0], @@ -309,7 +311,7 @@ public void testChooseTarget3() throws Exception { dataNodes[0].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } /** @@ -326,7 +328,7 @@ public void testChoooseTarget4() throws Exception { for(int i=0; i<2; i++) { dataNodes[i].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); + (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } DatanodeDescriptor[] targets; @@ -358,7 +360,7 @@ public void testChoooseTarget4() throws Exception { for(int i=0; i<2; i++) { dataNodes[i].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } @@ -424,7 +426,7 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { for(int i=0; i<2; i++) { dataNodes[i].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); + (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } final LogVerificationAppender appender = new LogVerificationAppender(); @@ -451,7 +453,7 @@ public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { for(int i=0; i<2; i++) { dataNodes[i].updateHeartbeat( 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, - HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); + HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 032c2c08396..3e2d60e4e17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -129,7 +129,7 @@ private static void setupDataNodeCapacity() { for(int i=0; i chosenNodes = new ArrayList(); chosenNodes.add(dataNodesInBoundaryCase[0]); @@ -651,7 +653,7 @@ public void testChooseMoreTargetsThanNodeGroups() throws Exception { for(int i=0; i live = new ArrayList(); live.add(dnDesc1); live.add(dnDesc2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index 42ea48230ee..c351b3468c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -126,6 +127,7 @@ private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx) .when(mock).sendHeartbeat( Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), + Mockito.any(CacheReport[].class), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index a5792ad217f..d45f3fba504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -154,6 +155,7 @@ public DatanodeRegistration answer(InvocationOnMock invocation) when(namenode.sendHeartbeat( Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), + Mockito.any(CacheReport[].class), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt())) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index af7d323c8a5..6d67670783c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -105,6 +106,7 @@ private static void setHeartbeatResponse(DatanodeCommand[] cmds) doReturn(response).when(spyNN).sendHeartbeat( (DatanodeRegistration) any(), (StorageReport[]) any(), + (CacheReport[]) any(), anyInt(), anyInt(), anyInt()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 77f8560816f..0d195941aeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -863,8 +864,9 @@ void sendHeartbeat() throws IOException { // TODO:FEDERATION currently a single block pool is supported StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; + CacheReport[] cacheRep = { new CacheReport(0l, 0l) }; DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, - rep, 0, 0, 0).getCommands(); + rep, cacheRep, 0, 0, 0).getCommands(); if(cmds != null) { for (DatanodeCommand cmd : cmds ) { if(LOG.isDebugEnabled()) { @@ -910,8 +912,9 @@ int replicateBlocks() throws IOException { // register datanode StorageReport[] rep = { new StorageReport(dnRegistration.getStorageID(), false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; + CacheReport[] cacheRep = { new CacheReport(0l, 0l) }; DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, - rep, 0, 0, 0).getCommands(); + rep, cacheRep, 0, 0, 0).getCommands(); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index cf64c335bac..b83adecd194 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -111,7 +111,8 @@ public static DelegationTokenSecretManager getDtSecretManager( public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg, DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException { return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), - dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0); + dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), + dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0); } public static boolean setReplication(final FSNamesystem ns, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index d78198ab402..ddcedcf44f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.protocol.CacheReport; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -142,7 +143,9 @@ public void testDeadDatanode() throws Exception { // that asks datanode to register again StorageReport[] rep = { new StorageReport(reg.getStorageID(), false, 0, 0, 0, 0) }; - DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0, 0, 0).getCommands(); + CacheReport[] cacheRep = { new CacheReport(0l, 0l) }; + DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, cacheRep, 0, 0, 0) + .getCommands(); assertEquals(1, cmd.length); assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER .getAction()); From fde0e615f26a1e237246383ca49c1445ff5719ae Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 3 Sep 2013 19:56:31 +0000 Subject: [PATCH 09/51] Add missing file from HDFS-5141 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1519819 13f79535-47bb-0310-9956-ffa450edef68 --- .../hdfs/server/protocol/CacheReport.java | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CacheReport.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CacheReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CacheReport.java new file mode 100644 index 00000000000..14e2f77a733 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CacheReport.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Utilization report for a Datanode cache + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class CacheReport { + private final long capacity; + private final long used; + + public CacheReport(long capacity, long used) { + this.capacity = capacity; + this.used = used; + } + + public long getCapacity() { + return capacity; + } + + public long getUsed() { + return used; + } +} From 97b7267977ef42201e5844df49bc37ec3d10ce16 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 3 Sep 2013 20:38:45 +0000 Subject: [PATCH 10/51] HDFS-5121. Add RPCs for creating and manipulating cache pools. (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1519841 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/fs/BatchedRemoteIterator.java | 121 ++++++++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 4 + .../AddPathCacheDirectiveException.java | 10 + .../hadoop/hdfs/protocol/CachePoolInfo.java | 89 +++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 50 +++- .../hdfs/protocol/PathCacheDirective.java | 5 +- .../RemovePathCacheEntryException.java | 10 + ...amenodeProtocolServerSideTranslatorPB.java | 128 ++++++++++- .../ClientNamenodeProtocolTranslatorPB.java | 217 ++++++++++++++---- .../hdfs/server/namenode/CacheManager.java | 168 +++++++++++++- .../hdfs/server/namenode/CachePool.java | 141 ++++++++++++ .../hdfs/server/namenode/FSNamesystem.java | 199 +++++++++++++++- .../server/namenode/FSPermissionChecker.java | 36 +++ .../server/namenode/NameNodeRpcServer.java | 106 +++++---- .../main/proto/ClientNamenodeProtocol.proto | 58 ++++- .../namenode/TestPathCacheRequests.java | 96 +++++++- 16 files changed, 1310 insertions(+), 128 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java new file mode 100644 index 00000000000..4c682c6b18b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.util.List; +import java.util.NoSuchElementException; + +/** + * A RemoteIterator that fetches elements in batches. + */ +public abstract class BatchedRemoteIterator implements RemoteIterator { + public interface BatchedEntries { + public E get(int i); + public int size(); + } + + public static class BatchedListEntries implements BatchedEntries { + private final List entries; + + public BatchedListEntries(List entries) { + this.entries = entries; + } + + public E get(int i) { + return entries.get(i); + + } + + public int size() { + return entries.size(); + } + } + + private K nextKey; + private final int maxRepliesPerRequest; + private BatchedEntries entries; + private int idx; + + public BatchedRemoteIterator(K nextKey, int maxRepliesPerRequest) { + this.nextKey = nextKey; + this.maxRepliesPerRequest = maxRepliesPerRequest; + this.entries = null; + this.idx = -1; + } + + /** + * Perform the actual remote request. + * + * @param key The key to send. + * @param maxRepliesPerRequest The maximum number of replies to allow. + * @return A list of replies. + */ + public abstract BatchedEntries makeRequest(K nextKey, int maxRepliesPerRequest) + throws IOException; + + private void makeRequest() throws IOException { + idx = 0; + entries = null; + entries = makeRequest(nextKey, maxRepliesPerRequest); + if (entries.size() > maxRepliesPerRequest) { + throw new IOException("invalid number of replies returned: got " + + entries.size() + ", expected " + maxRepliesPerRequest + + " at most."); + } + if (entries.size() == 0) { + entries = null; + } + } + + private void makeRequestIfNeeded() throws IOException { + if (idx == -1) { + makeRequest(); + } else if ((entries != null) && (idx >= entries.size())) { + if (entries.size() < maxRepliesPerRequest) { + // Last time, we got fewer entries than requested. + // So we should be at the end. + entries = null; + } else { + makeRequest(); + } + } + } + + @Override + public boolean hasNext() throws IOException { + makeRequestIfNeeded(); + return (entries != null); + } + + /** + * Return the next list key associated with an element. + */ + public abstract K elementToNextKey(E element); + + @Override + public E next() throws IOException { + makeRequestIfNeeded(); + if (entries == null) { + throw new NoSuchElementException(); + } + E entry = entries.get(idx++); + nextKey = elementToNextKey(entry); + return entry; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 769996d46a8..acc949680f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -21,6 +21,10 @@ HDFS-4949 (Unreleased) HDFS-5141. Add cache status information to datanode heartbeat. (Contributed by Andrew Wang) + HDFS-5121. Add RPCs for creating and manipulating cache pools. + (Contributed by Colin Patrick McCabe) + + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java index 3e0531c20c8..e162463d8d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java @@ -65,6 +65,16 @@ public InvalidPoolNameError(PathCacheDirective directive) { } } + public static class PoolWritePermissionDeniedError + extends AddPathCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public PoolWritePermissionDeniedError(PathCacheDirective directive) { + super("write permission denied for pool '" + directive.getPool() + "'", + directive); + } + } + public static class UnexpectedAddPathCacheDirectiveException extends AddPathCacheDirectiveException { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java new file mode 100644 index 00000000000..20006059a7a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import javax.annotation.Nullable; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Information about a cache pool. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class CachePoolInfo { + final String poolName; + + @Nullable + String ownerName; + + @Nullable + String groupName; + + @Nullable + Integer mode; + + @Nullable + Integer weight; + + public CachePoolInfo(String poolName) { + this.poolName = poolName; + } + + public String getPoolName() { + return poolName; + } + + public String getOwnerName() { + return ownerName; + } + + public CachePoolInfo setOwnerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + + public String getGroupName() { + return groupName; + } + + public CachePoolInfo setGroupName(String groupName) { + this.groupName = groupName; + return this; + } + + public Integer getMode() { + return mode; + } + + public CachePoolInfo setMode(Integer mode) { + this.mode = mode; + return this; + } + + public Integer getWeight() { + return weight; + } + + public CachePoolInfo setWeight(Integer weight) { + this.weight = weight; + return this; + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 165d0673f47..f07c950d215 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1144,5 +1144,53 @@ public List> removePathCacheEntries(List ids) @Idempotent public RemoteIterator listPathCacheEntries(long prevId, String pool, int maxRepliesPerRequest) throws IOException; -} + + /** + * Modify a cache pool. + * + * @param req + * The request to modify a cache pool. + * @throws IOException + * If the request could not be completed. + */ + @AtMostOnce + public void addCachePool(CachePoolInfo info) throws IOException; + /** + * Modify a cache pool. + * + * @param req + * The request to modify a cache pool. + * @throws IOException + * If the request could not be completed. + */ + @Idempotent + public void modifyCachePool(CachePoolInfo req) throws IOException; + + /** + * Remove a cache pool. + * + * @param cachePoolName + * Name of the cache pool to remove. + * @throws IOException + * if the cache pool did not exist, or could not be removed. + */ + @AtMostOnce + public void removeCachePool(String cachePoolName) throws IOException; + + /** + * List some cache pools. + * + * @param prevKey + * The previous key we listed. We will list keys greater than this. + * @param maxRepliesPerRequest + * Maximum number of cache pools to list. + * @return A remote iterator from which you can get CachePool objects. + * Requests will be made as needed. + * @throws IOException + * If there was an error listing cache pools. + */ + @Idempotent + public RemoteIterator listCachePools(String prevKey, + int maxRepliesPerRequest) throws IOException; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java index 8045186a6c2..8c6d742d4cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java @@ -36,7 +36,7 @@ public class PathCacheDirective implements Comparable { private final String pool; - public PathCacheDirective(String path, String pool) throws IOException { + public PathCacheDirective(String path, String pool) { Preconditions.checkNotNull(path); Preconditions.checkNotNull(pool); this.path = path; @@ -67,10 +67,9 @@ public void validate() throws IOException { if (path.isEmpty()) { throw new EmptyPathError(this); } - if (DFSUtil.isValidName(path)) { + if (!DFSUtil.isValidName(path)) { throw new InvalidPathNameError(this); } - if (pool.isEmpty()) { throw new InvalidPoolNameError(this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java index 41f7269cdd1..04e88dfe6c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java @@ -47,6 +47,16 @@ public InvalidIdException(long entryId) { } } + public final static class RemovePermissionDeniedException + extends RemovePathCacheEntryException { + private static final long serialVersionUID = 1L; + + public RemovePermissionDeniedException(long entryId) { + super("permission denied when trying to remove path cache entry id " + + entryId, entryId); + } + } + public final static class NoSuchIdException extends RemovePathCacheEntryException { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index c02bcecbe61..f9a5bfbc914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -27,26 +27,29 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; @@ -77,8 +80,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; @@ -105,22 +108,30 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -160,6 +171,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.Text; @@ -1081,6 +1093,9 @@ public RemovePathCacheEntriesResponseProto removePathCacheEntries( } catch (NoSuchIdException ioe) { builder.addResults(RemovePathCacheEntryErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE); + } catch (RemovePermissionDeniedException ioe) { + builder.addResults(RemovePathCacheEntryErrorProto. + REMOVE_PERMISSION_DENIED_ERROR_VALUE); } catch (IOException ioe) { builder.addResults(RemovePathCacheEntryErrorProto. UNEXPECTED_REMOVE_ERROR_VALUE); @@ -1115,4 +1130,99 @@ public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController cont throw new ServiceException(e); } } + + @Override + public AddCachePoolResponseProto addCachePool(RpcController controller, + AddCachePoolRequestProto request) throws ServiceException { + try { + CachePoolInfo info = + new CachePoolInfo(request.getPoolName()); + if (request.hasOwnerName()) { + info.setOwnerName(request.getOwnerName()); + } + if (request.hasGroupName()) { + info.setGroupName(request.getGroupName()); + } + if (request.hasMode()) { + info.setMode(request.getMode()); + } + if (request.hasWeight()) { + info.setWeight(request.getWeight()); + } + server.addCachePool(info); + return AddCachePoolResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, + ModifyCachePoolRequestProto request) throws ServiceException { + try { + CachePoolInfo info = + new CachePoolInfo(request.getPoolName()); + if (request.hasOwnerName()) { + info.setOwnerName(request.getOwnerName()); + } + if (request.hasGroupName()) { + info.setGroupName(request.getGroupName()); + } + if (request.hasMode()) { + info.setMode(request.getMode()); + } + if (request.hasWeight()) { + info.setWeight(request.getWeight()); + } + server.modifyCachePool(info); + return ModifyCachePoolResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RemoveCachePoolResponseProto removeCachePool(RpcController controller, + RemoveCachePoolRequestProto request) throws ServiceException { + try { + server.removeCachePool(request.getPoolName()); + return RemoveCachePoolResponseProto.newBuilder().build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ListCachePoolsResponseProto listCachePools(RpcController controller, + ListCachePoolsRequestProto request) throws ServiceException { + try { + RemoteIterator iter = + server.listCachePools(request.getPrevPoolName(), + request.getMaxReplies()); + ListCachePoolsResponseProto.Builder responseBuilder = + ListCachePoolsResponseProto.newBuilder(); + while (iter.hasNext()) { + CachePoolInfo pool = iter.next(); + ListCachePoolsResponseElementProto.Builder elemBuilder = + ListCachePoolsResponseElementProto.newBuilder(); + elemBuilder.setPoolName(pool.getPoolName()); + if (pool.getOwnerName() != null) { + elemBuilder.setOwnerName(pool.getOwnerName()); + } + if (pool.getGroupName() != null) { + elemBuilder.setGroupName(pool.getGroupName()); + } + if (pool.getMode() != null) { + elemBuilder.setMode(pool.getMode()); + } + if (pool.getWeight() != null) { + elemBuilder.setWeight(pool.getWeight()); + } + responseBuilder.addElements(elemBuilder.build()); + } + return responseBuilder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index eb9845e849b..4b8687e1d99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -45,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -58,11 +61,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; @@ -108,14 +113,19 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; @@ -1064,6 +1074,9 @@ private static IOException removePathCacheEntriesError(long code, long id) { } else if (code == RemovePathCacheEntryErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE) { return new NoSuchIdException(id); + } else if (code == RemovePathCacheEntryErrorProto. + REMOVE_PERMISSION_DENIED_ERROR_VALUE) { + return new RemovePermissionDeniedException(id); } else { return new UnexpectedRemovePathCacheEntryException(id); } @@ -1098,32 +1111,49 @@ public List> removePathCacheEntries(List ids) } } - private class PathCacheEntriesIterator - implements RemoteIterator { - private long prevId; - private final String pool; - private final int repliesPerRequest; + private static class BatchedPathCacheEntries + implements BatchedEntries { private ListPathCacheEntriesResponseProto response; - private int idx; - public PathCacheEntriesIterator(long prevId, String pool, - int repliesPerRequest) { - this.prevId = prevId; - this.pool = pool; - this.repliesPerRequest = repliesPerRequest; - this.response = null; - this.idx = -1; + BatchedPathCacheEntries(ListPathCacheEntriesResponseProto response) { + this.response = response; } - private void makeRequest() throws IOException { - idx = 0; - response = null; + @Override + public PathCacheEntry get(int i) { + ListPathCacheEntriesElementProto elementProto = + response.getElements(i); + return new PathCacheEntry(elementProto.getId(), + new PathCacheDirective(elementProto.getPath(), + elementProto.getPool())); + } + + @Override + public int size() { + return response.getElementsCount(); + } + } + + private class PathCacheEntriesIterator + extends BatchedRemoteIterator { + private final String pool; + + public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest, + String pool) { + super(prevKey, maxRepliesPerRequest); + this.pool = pool; + } + + @Override + public BatchedEntries makeRequest( + Long nextKey, int maxRepliesPerRequest) throws IOException { + ListPathCacheEntriesResponseProto response; try { ListPathCacheEntriesRequestProto req = ListPathCacheEntriesRequestProto.newBuilder(). - setPrevId(prevId). + setPrevId(nextKey). setPool(pool). - setMaxReplies(repliesPerRequest). + setMaxReplies(maxRepliesPerRequest). build(); response = rpcProxy.listPathCacheEntries(null, req); if (response.getElementsCount() == 0) { @@ -1132,45 +1162,134 @@ private void makeRequest() throws IOException { } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } - } - - private void makeRequestIfNeeded() throws IOException { - if (idx == -1) { - makeRequest(); - } else if ((response != null) && (idx >= response.getElementsCount())) { - if (response.getHasMore()) { - makeRequest(); - } else { - response = null; - } - } + return new BatchedPathCacheEntries(response); } @Override - public boolean hasNext() throws IOException { - makeRequestIfNeeded(); - return (response != null); - } - - @Override - public PathCacheEntry next() throws IOException { - makeRequestIfNeeded(); - if (response == null) { - throw new NoSuchElementException(); - } - ListPathCacheEntriesElementProto elementProto = - response.getElements(idx); - prevId = elementProto.getId(); - idx++; - return new PathCacheEntry(elementProto.getId(), - new PathCacheDirective(elementProto.getPath(), - elementProto.getPool())); + public Long elementToNextKey(PathCacheEntry element) { + return element.getEntryId(); } } @Override public RemoteIterator listPathCacheEntries(long prevId, String pool, int repliesPerRequest) throws IOException { - return new PathCacheEntriesIterator(prevId, pool, repliesPerRequest); + return new PathCacheEntriesIterator(prevId, repliesPerRequest, pool); + } + + @Override + public void addCachePool(CachePoolInfo info) throws IOException { + AddCachePoolRequestProto.Builder builder = + AddCachePoolRequestProto.newBuilder(); + builder.setPoolName(info.getPoolName()); + if (info.getOwnerName() != null) { + builder.setOwnerName(info.getOwnerName()); + } + if (info.getGroupName() != null) { + builder.setGroupName(info.getGroupName()); + } + if (info.getMode() != null) { + builder.setMode(info.getMode()); + } + if (info.getWeight() != null) { + builder.setWeight(info.getWeight()); + } + try { + rpcProxy.addCachePool(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void modifyCachePool(CachePoolInfo req) throws IOException { + ModifyCachePoolRequestProto.Builder builder = + ModifyCachePoolRequestProto.newBuilder(); + builder.setPoolName(req.getPoolName()); + if (req.getOwnerName() != null) { + builder.setOwnerName(req.getOwnerName()); + } + if (req.getGroupName() != null) { + builder.setGroupName(req.getGroupName()); + } + if (req.getMode() != null) { + builder.setMode(req.getMode()); + } + if (req.getWeight() != null) { + builder.setWeight(req.getWeight()); + } + try { + rpcProxy.modifyCachePool(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public void removeCachePool(String cachePoolName) throws IOException { + try { + rpcProxy.removeCachePool(null, + RemoveCachePoolRequestProto.newBuilder(). + setPoolName(cachePoolName).build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + private static class BatchedPathDirectiveEntries + implements BatchedEntries { + private final ListCachePoolsResponseProto proto; + + public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) { + this.proto = proto; + } + + @Override + public CachePoolInfo get(int i) { + ListCachePoolsResponseElementProto elem = proto.getElements(i); + return new CachePoolInfo(elem.getPoolName()). + setOwnerName(elem.getOwnerName()). + setGroupName(elem.getGroupName()). + setMode(elem.getMode()). + setWeight(elem.getWeight()); + } + + @Override + public int size() { + return proto.getElementsCount(); + } + } + + private class CachePoolIterator + extends BatchedRemoteIterator { + + public CachePoolIterator(String prevKey, int maxRepliesPerRequest) { + super(prevKey, maxRepliesPerRequest); + } + + @Override + public BatchedEntries makeRequest(String prevKey, + int maxRepliesPerRequest) throws IOException { + try { + return new BatchedPathDirectiveEntries( + rpcProxy.listCachePools(null, + ListCachePoolsRequestProto.newBuilder(). + setPrevPoolName(prevKey). + setMaxReplies(maxRepliesPerRequest).build())); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + + @Override + public String elementToNextKey(CachePoolInfo element) { + return element.getPoolName(); + } + } + + @Override + public RemoteIterator listCachePools(String prevKey, + int maxRepliesPerRequest) throws IOException { + return new CachePoolIterator(prevKey, maxRepliesPerRequest); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 8be575a9701..06475802c61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -27,12 +27,17 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.util.Fallible; /** @@ -56,6 +61,12 @@ final class CacheManager { private final TreeMap entriesByDirective = new TreeMap(); + /** + * Cache pools, sorted by name. + */ + private final TreeMap cachePools = + new TreeMap(); + /** * The entry ID to use for a new entry. */ @@ -80,16 +91,31 @@ synchronized long getNextEntryId() throws IOException { } private synchronized Fallible addDirective( - PathCacheDirective directive) { + PathCacheDirective directive, FSPermissionChecker pc) { + CachePool pool = cachePools.get(directive.getPool()); + if (pool == null) { + LOG.info("addDirective " + directive + ": pool not found."); + return new Fallible( + new InvalidPoolNameError(directive)); + } + if (!pc.checkWritePermission(pool.getOwnerName(), + pool.getGroupName(), pool.getMode())) { + LOG.info("addDirective " + directive + ": write permission denied."); + return new Fallible( + new PoolWritePermissionDeniedError(directive)); + } try { directive.validate(); } catch (IOException ioe) { + LOG.info("addDirective " + directive + ": validation failed."); return new Fallible(ioe); } // Check if we already have this entry. PathCacheEntry existing = entriesByDirective.get(directive); if (existing != null) { // Entry already exists: return existing entry. + LOG.info("addDirective " + directive + ": there is an " + + "existing directive " + existing); return new Fallible(existing); } // Add a new entry with the next available ID. @@ -100,33 +126,57 @@ private synchronized Fallible addDirective( return new Fallible( new UnexpectedAddPathCacheDirectiveException(directive)); } + LOG.info("addDirective " + directive + ": added cache directive " + + directive); entriesByDirective.put(directive, entry); entriesById.put(entry.getEntryId(), entry); return new Fallible(entry); } public synchronized List> addDirectives( - List directives) { + List directives, FSPermissionChecker pc) { ArrayList> results = new ArrayList>(directives.size()); for (PathCacheDirective directive: directives) { - results.add(addDirective(directive)); + results.add(addDirective(directive, pc)); } return results; } - private synchronized Fallible removeEntry(long entryId) { + private synchronized Fallible removeEntry(long entryId, + FSPermissionChecker pc) { // Check for invalid IDs. if (entryId <= 0) { + LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); return new Fallible(new InvalidIdException(entryId)); } // Find the entry. PathCacheEntry existing = entriesById.get(entryId); if (existing == null) { + LOG.info("removeEntry " + entryId + ": entry not found."); return new Fallible(new NoSuchIdException(entryId)); } + CachePool pool = cachePools.get(existing.getDirective().getPool()); + if (pool == null) { + LOG.info("removeEntry " + entryId + ": pool not found for directive " + + existing.getDirective()); + return new Fallible( + new UnexpectedRemovePathCacheEntryException(entryId)); + } + if (!pc.isSuperUser()) { + if (!pc.checkWritePermission(pool.getOwnerName(), + pool.getGroupName(), pool.getMode())) { + LOG.info("removeEntry " + entryId + ": write permission denied to " + + "pool " + pool + " for entry " + existing); + return new Fallible( + new RemovePermissionDeniedException(entryId)); + } + } + // Remove the corresponding entry in entriesByDirective. if (entriesByDirective.remove(existing.getDirective()) == null) { + LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + + existing + " in entriesByDirective"); return new Fallible( new UnexpectedRemovePathCacheEntryException(entryId)); } @@ -134,11 +184,12 @@ private synchronized Fallible removeEntry(long entryId) { return new Fallible(entryId); } - public synchronized List> removeEntries(List entryIds) { + public synchronized List> removeEntries(List entryIds, + FSPermissionChecker pc) { ArrayList> results = new ArrayList>(entryIds.size()); for (Long entryId : entryIds) { - results.add(removeEntry(entryId)); + results.add(removeEntry(entryId, pc)); } return results; } @@ -162,4 +213,109 @@ public synchronized List listPathCacheEntries(long prevId, } return replies; } + + /** + * Create a cache pool. + * + * Only the superuser should be able to call this function. + * + * @param info + * The info for the cache pool to create. + */ + public synchronized void addCachePool(CachePoolInfo info) + throws IOException { + String poolName = info.getPoolName(); + if (poolName.isEmpty()) { + throw new IOException("invalid empty cache pool name"); + } + CachePool pool = cachePools.get(poolName); + if (pool != null) { + throw new IOException("cache pool " + poolName + " already exists."); + } + CachePool cachePool = new CachePool(poolName, + info.getOwnerName(), info.getGroupName(), info.getMode(), + info.getWeight()); + cachePools.put(poolName, cachePool); + LOG.info("created new cache pool " + cachePool); + } + + /** + * Modify a cache pool. + * + * Only the superuser should be able to call this function. + * + * @param info + * The info for the cache pool to modify. + */ + public synchronized void modifyCachePool(CachePoolInfo info) + throws IOException { + String poolName = info.getPoolName(); + if (poolName.isEmpty()) { + throw new IOException("invalid empty cache pool name"); + } + CachePool pool = cachePools.get(poolName); + if (pool == null) { + throw new IOException("cache pool " + poolName + " does not exist."); + } + StringBuilder bld = new StringBuilder(); + String prefix = ""; + if (info.getOwnerName() != null) { + pool.setOwnerName(info.getOwnerName()); + bld.append(prefix). + append("set owner to ").append(info.getOwnerName()); + prefix = "; "; + } + if (info.getGroupName() != null) { + pool.setGroupName(info.getGroupName()); + bld.append(prefix). + append("set group to ").append(info.getGroupName()); + prefix = "; "; + } + if (info.getMode() != null) { + pool.setMode(info.getMode()); + bld.append(prefix). + append(String.format("set mode to 0%3o", info.getMode())); + prefix = "; "; + } + if (info.getWeight() != null) { + pool.setWeight(info.getWeight()); + bld.append(prefix). + append("set weight to ").append(info.getWeight()); + prefix = "; "; + } + if (prefix.isEmpty()) { + bld.append("no changes."); + } + LOG.info("modified " + poolName + "; " + bld.toString()); + } + + /** + * Remove a cache pool. + * + * Only the superuser should be able to call this function. + * + * @param poolName + * The name for the cache pool to remove. + */ + public synchronized void removeCachePool(String poolName) + throws IOException { + CachePool pool = cachePools.remove(poolName); + if (pool == null) { + throw new IOException("can't remove nonexistent cache pool " + poolName); + } + } + + public synchronized List + listCachePools(FSPermissionChecker pc, String prevKey, + int maxRepliesPerRequest) { + final int MAX_PREALLOCATED_REPLIES = 16; + ArrayList results = + new ArrayList(Math.min(MAX_PREALLOCATED_REPLIES, + maxRepliesPerRequest)); + SortedMap tailMap = cachePools.tailMap(prevKey, false); + for (Entry cur : tailMap.entrySet()) { + results.add(cur.getValue().getInfo(pc)); + } + return results; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java new file mode 100644 index 00000000000..8a8f30b8121 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import javax.annotation.Nonnull; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The NameNode uses CachePools to manage cache resources on the DataNodes. + */ +public final class CachePool { + public static final Log LOG = LogFactory.getLog(CachePool.class); + + @Nonnull + private final String poolName; + + @Nonnull + private String ownerName; + + @Nonnull + private String groupName; + + private int mode; + + private int weight; + + public static String getCurrentUserPrimaryGroupName() throws IOException { + UserGroupInformation ugi= NameNode.getRemoteUser(); + String[] groups = ugi.getGroupNames(); + if (groups.length == 0) { + throw new IOException("failed to get group names from UGI " + ugi); + } + return groups[0]; + } + + public CachePool(String poolName, String ownerName, String groupName, + Integer mode, Integer weight) throws IOException { + this.poolName = poolName; + this.ownerName = ownerName != null ? ownerName : + NameNode.getRemoteUser().getShortUserName(); + this.groupName = groupName != null ? groupName : + getCurrentUserPrimaryGroupName(); + this.mode = mode != null ? mode : 0644; + this.weight = weight != null ? weight : 100; + } + + public String getName() { + return poolName; + } + + public String getOwnerName() { + return ownerName; + } + + public CachePool setOwnerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + + public String getGroupName() { + return groupName; + } + + public CachePool setGroupName(String groupName) { + this.groupName = groupName; + return this; + } + + public int getMode() { + return mode; + } + + public CachePool setMode(int mode) { + this.mode = mode; + return this; + } + + public int getWeight() { + return weight; + } + + public CachePool setWeight(int weight) { + this.weight = weight; + return this; + } + + /** + * Get information about this cache pool. + * + * @param fullInfo + * If true, only the name will be returned (i.e., what you + * would get if you didn't have read permission for this pool.) + * @return + * Cache pool information. + */ + public CachePoolInfo getInfo(boolean fullInfo) { + CachePoolInfo info = new CachePoolInfo(poolName); + if (!fullInfo) { + return info; + } + return info.setOwnerName(ownerName). + setGroupName(groupName). + setMode(mode). + setWeight(weight); + } + + public CachePoolInfo getInfo(FSPermissionChecker pc) { + return getInfo(pc.checkReadPermission(ownerName, groupName, mode)); + } + + public String toString() { + return new StringBuilder(). + append("{ ").append("poolName:").append(poolName). + append(", ownerName:").append(ownerName). + append(", groupName:").append(groupName). + append(", mode:").append(String.format("%3o", mode)). + append(", weight:").append(weight). + append(" }").toString(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9e817629d10..ca287ab7dcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -155,6 +155,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -6700,6 +6701,7 @@ void deleteSnapshot(String snapshotRoot, String snapshotName) return; // Return previous response } boolean success = false; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6748,17 +6750,198 @@ void removeSnapshottableDirs(List toRemove) { } List> addPathCacheDirectives( - List directives) { - return cacheManager.addDirectives(directives); + List directives) throws IOException { + CacheEntryWithPayload retryCacheEntry = + RetryCache.waitForCompletion(retryCache, null); + if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { + return (List>) retryCacheEntry.getPayload(); + } + final FSPermissionChecker pc = getPermissionChecker(); + boolean success = false; + List> results = null; + checkOperation(OperationCategory.WRITE); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot add path cache directive", safeMode); + } + results = cacheManager.addDirectives(directives, pc); + //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (success) { + getEditLog().logSync(); + } + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "addPathCacheDirectives", null, null, null); + } + RetryCache.setState(retryCacheEntry, success, results); + } + return results; } - List> removePathCacheEntries(List ids) { - return cacheManager.removeEntries(ids); + List> removePathCacheEntries(List ids) throws IOException { + CacheEntryWithPayload retryCacheEntry = + RetryCache.waitForCompletion(retryCache, null); + if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { + return (List>) retryCacheEntry.getPayload(); + } + final FSPermissionChecker pc = getPermissionChecker(); + boolean success = false; + List> results = null; + checkOperation(OperationCategory.WRITE); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot remove path cache directives", safeMode); + } + results = cacheManager.removeEntries(ids, pc); + //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "removePathCacheEntries", null, null, null); + } + RetryCache.setState(retryCacheEntry, success, results); + } + getEditLog().logSync(); + return results; } List listPathCacheEntries(long startId, String pool, - int maxReplies) { - return cacheManager.listPathCacheEntries(startId, pool, maxReplies); + int maxReplies) throws IOException { + checkOperation(OperationCategory.READ); + readLock(); + try { + checkOperation(OperationCategory.READ); + return cacheManager.listPathCacheEntries(startId, pool, maxReplies); + } finally { + readUnlock(); + } + } + + public void addCachePool(CachePoolInfo req) throws IOException { + final FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + checkOperation(OperationCategory.WRITE); + writeLock(); + boolean success = false; + try { + checkOperation(OperationCategory.WRITE); + if (!pc.isSuperUser()) { + throw new AccessControlException("Non-super users cannot " + + "add cache pools."); + } + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot add cache pool " + req.getPoolName(), safeMode); + } + cacheManager.addCachePool(req); + //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "addCachePool", req.getPoolName(), null, null); + } + RetryCache.setState(cacheEntry, success); + } + + getEditLog().logSync(); + } + + public void modifyCachePool(CachePoolInfo req) throws IOException { + final FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + checkOperation(OperationCategory.WRITE); + writeLock(); + boolean success = false; + try { + checkOperation(OperationCategory.WRITE); + if (!pc.isSuperUser()) { + throw new AccessControlException("Non-super users cannot " + + "modify cache pools."); + } + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot modify cache pool " + req.getPoolName(), safeMode); + } + cacheManager.modifyCachePool(req); + //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null); + } + RetryCache.setState(cacheEntry, success); + } + + getEditLog().logSync(); + } + + public void removeCachePool(String cachePoolName) throws IOException { + final FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + checkOperation(OperationCategory.WRITE); + writeLock(); + boolean success = false; + try { + checkOperation(OperationCategory.WRITE); + if (!pc.isSuperUser()) { + throw new AccessControlException("Non-super users cannot " + + "remove cache pools."); + } + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot remove cache pool " + cachePoolName, safeMode); + } + cacheManager.removeCachePool(cachePoolName); + //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "removeCachePool", cachePoolName, null, null); + } + RetryCache.setState(cacheEntry, success); + } + + getEditLog().logSync(); + } + + public List listCachePools(String prevKey, + int maxRepliesPerRequest) throws IOException { + final FSPermissionChecker pc = getPermissionChecker(); + List results; + checkOperation(OperationCategory.READ); + readLock(); + try { + checkOperation(OperationCategory.READ); + results = cacheManager.listCachePools(pc, prevKey, maxRepliesPerRequest); + } finally { + readUnlock(); + } + return results; + } + + public CacheManager getCacheManager() { + return cacheManager; } /** @@ -6798,8 +6981,4 @@ public void logAuditEvent(boolean succeeded, String userName, } } } - - public CacheManager getCacheManager() { - return cacheManager; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index a02bc4044de..c516a73e57f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -255,4 +255,40 @@ private void checkStickyBit(INode parent, INode inode, Snapshot snapshot throw new AccessControlException("Permission denied by sticky bit setting:" + " user=" + user + ", inode=" + inode); } + + /** + * Check if this CachePool can be accessed. + * + * @param pc + * Permission checker object with user name and groups. + * @param write + * True if we care about write access; false otherwise. + * @return + * True only if the cache pool is accessible. + */ + private boolean checkPermission(String userName, + String groupName, int mode, int mask) { + if ((mode & mask) != 0) { + return true; + } + if (((mode & (mask << 6)) != 0) + && (getUser().equals(userName))) { + return true; + } + if (((mode & (mask << 6)) != 0) + && (containsGroup(groupName))) { + return true; + } + return false; + } + + public boolean checkWritePermission(String userName, + String groupName, int mode) { + return checkPermission(userName, groupName, mode, 02); + } + + public boolean checkReadPermission(String userName, + String groupName, int mode) { + return checkPermission(userName, groupName, mode, 04); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b96df2a6aa0..9eb09bb43af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -36,6 +36,8 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BatchedRemoteIterator; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -62,6 +64,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -1219,68 +1222,75 @@ public List> removePathCacheEntries(List ids) return namesystem.removePathCacheEntries(ids); } - private class PathCacheEntriesIterator - implements RemoteIterator { - private long prevId; + private class ServerSidePathCacheEntriesIterator + extends BatchedRemoteIterator { + private final String pool; - private final int repliesPerRequest; - private List entries; - private int idx; - public PathCacheEntriesIterator(long prevId, String pool, - int repliesPerRequest) { - this.prevId = prevId; + public ServerSidePathCacheEntriesIterator(Long firstKey, + int maxRepliesPerRequest, String pool) { + super(firstKey, maxRepliesPerRequest); this.pool = pool; - this.repliesPerRequest = repliesPerRequest; - this.entries = null; - this.idx = -1; - } - - private void makeRequest() throws IOException { - idx = 0; - entries = null; - entries = namesystem.listPathCacheEntries(prevId, pool, - repliesPerRequest); - if (entries.isEmpty()) { - entries = null; - } - } - - private void makeRequestIfNeeded() throws IOException { - if (idx == -1) { - makeRequest(); - } else if ((entries != null) && (idx >= entries.size())) { - if (entries.size() < repliesPerRequest) { - // Last time, we got fewer entries than requested. - // So we should be at the end. - entries = null; - } else { - makeRequest(); - } - } } @Override - public boolean hasNext() throws IOException { - makeRequestIfNeeded(); - return (entries != null); + public BatchedEntries makeRequest( + Long nextKey, int maxRepliesPerRequest) throws IOException { + return new BatchedListEntries( + namesystem.listPathCacheEntries(nextKey, pool, + maxRepliesPerRequest)); } @Override - public PathCacheEntry next() throws IOException { - makeRequestIfNeeded(); - if (entries == null) { - throw new NoSuchElementException(); - } - PathCacheEntry entry = entries.get(idx++); - prevId = entry.getEntryId(); - return entry; + public Long elementToNextKey(PathCacheEntry entry) { + return entry.getEntryId(); } } @Override public RemoteIterator listPathCacheEntries(long prevId, String pool, int maxReplies) throws IOException { - return new PathCacheEntriesIterator(prevId, pool, maxReplies); + return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, pool); + } + + @Override + public void addCachePool(CachePoolInfo info) throws IOException { + namesystem.addCachePool(info); + } + + @Override + public void modifyCachePool(CachePoolInfo info) throws IOException { + namesystem.modifyCachePool(info); + } + + @Override + public void removeCachePool(String cachePoolName) throws IOException { + namesystem.removeCachePool(cachePoolName); + } + + private class ServerSideCachePoolIterator + extends BatchedRemoteIterator { + + public ServerSideCachePoolIterator(String prevKey, int maxRepliesPerRequest) { + super(prevKey, maxRepliesPerRequest); + } + + @Override + public BatchedEntries makeRequest(String prevKey, + int maxRepliesPerRequest) throws IOException { + return new BatchedListEntries( + namesystem.listCachePools(prevKey, maxRepliesPerRequest)); + } + + @Override + public String elementToNextKey(CachePoolInfo element) { + return element.getPoolName(); + } + } + + @Override + public RemoteIterator listCachePools(String prevKey, + int maxRepliesPerRequest) throws IOException { + return new ServerSideCachePoolIterator(prevKey, maxRepliesPerRequest); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 9d1bfd5a354..e799ebf413a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -394,7 +394,8 @@ message RemovePathCacheEntriesResponseProto { enum RemovePathCacheEntryErrorProto { INVALID_CACHED_PATH_ID_ERROR = -1; NO_SUCH_CACHED_PATH_ID_ERROR = -2; - UNEXPECTED_REMOVE_ERROR = -3; + REMOVE_PERMISSION_DENIED_ERROR = -3; + UNEXPECTED_REMOVE_ERROR = -4; } message ListPathCacheEntriesRequestProto { @@ -414,6 +415,53 @@ message ListPathCacheEntriesResponseProto { required bool hasMore = 2; } +message AddCachePoolRequestProto { + required string poolName = 1; + optional string ownerName = 2; + optional string groupName = 3; + optional int32 mode = 4; + optional int32 weight = 5; +} + +message AddCachePoolResponseProto { // void response +} + +message ModifyCachePoolRequestProto { + required string poolName = 1; + optional string ownerName = 2; + optional string groupName = 3; + optional int32 mode = 4; + optional int32 weight = 5; +} + +message ModifyCachePoolResponseProto { // void response +} + +message RemoveCachePoolRequestProto { + required string poolName = 1; +} + +message RemoveCachePoolResponseProto { // void response +} + +message ListCachePoolsRequestProto { + required string prevPoolName = 1; + required int32 maxReplies = 2; +} + +message ListCachePoolsResponseProto { + repeated ListCachePoolsResponseElementProto elements = 1; + optional bool hasMore = 2; +} + +message ListCachePoolsResponseElementProto { + required string poolName = 1; + required string ownerName = 2; + required string groupName = 3; + required int32 mode = 4; + required int32 weight = 5; +} + message GetFileLinkInfoRequestProto { required string src = 1; } @@ -601,6 +649,14 @@ service ClientNamenodeProtocol { returns (RemovePathCacheEntriesResponseProto); rpc listPathCacheEntries(ListPathCacheEntriesRequestProto) returns (ListPathCacheEntriesResponseProto); + rpc addCachePool(AddCachePoolRequestProto) + returns(AddCachePoolResponseProto); + rpc modifyCachePool(ModifyCachePoolRequestProto) + returns(ModifyCachePoolResponseProto); + rpc removeCachePool(RemoveCachePoolRequestProto) + returns(RemoveCachePoolResponseProto); + rpc listCachePools(ListCachePoolsRequestProto) + returns(ListCachePoolsResponseProto); rpc getFileLinkInfo(GetFileLinkInfoRequestProto) returns(GetFileLinkInfoResponseProto); rpc getContentSummary(GetContentSummaryRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java index fe7ae38d7b5..66aba064537 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.*; + import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -32,17 +34,89 @@ import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Fallible; import org.junit.Test; public class TestPathCacheRequests { static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); + @Test + public void testCreateAndRemovePools() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + NamenodeProtocols proto = cluster.getNameNodeRpc(); + CachePoolInfo req = new CachePoolInfo("pool1"). + setOwnerName("bob").setGroupName("bobgroup"). + setMode(0755).setWeight(150); + proto.addCachePool(req); + try { + proto.removeCachePool("pool99"); + Assert.fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "nonexistent cache pool", ioe); + } + proto.removeCachePool("pool1"); + try { + proto.removeCachePool("pool1"); + Assert.fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "nonexistent cache pool", ioe); + } + req = new CachePoolInfo("pool2"); + proto.addCachePool(req); + } + + @Test + public void testCreateAndModifyPools() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + NamenodeProtocols proto = cluster.getNameNodeRpc(); + proto.addCachePool(new CachePoolInfo("pool1"). + setOwnerName("abc").setGroupName("123"). + setMode(0755).setWeight(150)); + proto.modifyCachePool(new CachePoolInfo("pool1"). + setOwnerName("def").setGroupName("456")); + RemoteIterator iter = proto.listCachePools("", 1); + CachePoolInfo info = iter.next(); + assertEquals("pool1", info.getPoolName()); + assertEquals("def", info.getOwnerName()); + assertEquals("456", info.getGroupName()); + assertEquals(Integer.valueOf(0755), info.getMode()); + assertEquals(Integer.valueOf(150), info.getWeight()); + + try { + proto.removeCachePool("pool99"); + Assert.fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + } + proto.removeCachePool("pool1"); + try { + proto.removeCachePool("pool1"); + Assert.fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + } + } + private static void validateListAll( RemoteIterator iter, long id0, long id1, long id2) throws Exception { @@ -67,12 +141,18 @@ public void testSetAndGet() throws Exception { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); NamenodeProtocols proto = cluster.getNameNodeRpc(); + proto.addCachePool(new CachePoolInfo("pool1")); + proto.addCachePool(new CachePoolInfo("pool2")); + proto.addCachePool(new CachePoolInfo("pool3")); + proto.addCachePool(new CachePoolInfo("pool4").setMode(0)); List> addResults1 = proto.addPathCacheDirectives(Arrays.asList( new PathCacheDirective[] { new PathCacheDirective("/alpha", "pool1"), new PathCacheDirective("/beta", "pool2"), - new PathCacheDirective("", "pool3") + new PathCacheDirective("", "pool3"), + new PathCacheDirective("/zeta", "nonexistent_pool"), + new PathCacheDirective("/zeta", "pool4") })); long ids1[] = new long[2]; ids1[0] = addResults1.get(0).get().getEntryId(); @@ -83,6 +163,20 @@ public void testSetAndGet() throws Exception { } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof EmptyPathError); } + try { + addResults1.get(3).get(); + Assert.fail("expected an error when adding to a nonexistent pool."); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); + } + try { + addResults1.get(4).get(); + Assert.fail("expected an error when adding to a pool with " + + "mode 0 (no permissions for anyone)."); + } catch (IOException ioe) { + Assert.assertTrue(ioe.getCause() + instanceof PoolWritePermissionDeniedError); + } List> addResults2 = proto.addPathCacheDirectives(Arrays.asList( From d56d0b46e1b82ae068083ddb99872d314684dc82 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 4 Sep 2013 18:23:51 +0000 Subject: [PATCH 11/51] commit correct version of HDFS-5121 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520090 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/fs/BatchedRemoteIterator.java | 19 +- .../AddPathCacheDirectiveException.java | 12 +- .../hadoop/hdfs/protocol/CachePoolInfo.java | 144 ++++++++++--- .../hadoop/hdfs/protocol/ClientProtocol.java | 131 +++++------- .../hdfs/protocol/PathCacheDirective.java | 25 ++- ...amenodeProtocolServerSideTranslatorPB.java | 95 +++------ .../ClientNamenodeProtocolTranslatorPB.java | 155 ++++++-------- .../hadoop/hdfs/protocolPB/PBHelper.java | 78 +++++++ .../hdfs/server/namenode/CacheManager.java | 200 +++++++++++------- .../hdfs/server/namenode/CachePool.java | 168 +++++++-------- .../hdfs/server/namenode/FSNamesystem.java | 124 +++++------ .../server/namenode/FSPermissionChecker.java | 41 ++-- .../server/namenode/NameNodeRpcServer.java | 57 +++-- .../main/proto/ClientNamenodeProtocol.proto | 58 ++--- .../server/datanode/TestFsDatasetCache.java | 19 +- .../namenode/TestPathCacheRequests.java | 186 +++++++++------- 16 files changed, 828 insertions(+), 684 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java index 4c682c6b18b..42100d83e09 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java @@ -29,7 +29,7 @@ public interface BatchedEntries { public E get(int i); public int size(); } - + public static class BatchedListEntries implements BatchedEntries { private final List entries; @@ -39,7 +39,6 @@ public BatchedListEntries(List entries) { public E get(int i) { return entries.get(i); - } public int size() { @@ -47,13 +46,13 @@ public int size() { } } - private K nextKey; + private K prevKey; private final int maxRepliesPerRequest; private BatchedEntries entries; private int idx; - public BatchedRemoteIterator(K nextKey, int maxRepliesPerRequest) { - this.nextKey = nextKey; + public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) { + this.prevKey = prevKey; this.maxRepliesPerRequest = maxRepliesPerRequest; this.entries = null; this.idx = -1; @@ -66,13 +65,13 @@ public BatchedRemoteIterator(K nextKey, int maxRepliesPerRequest) { * @param maxRepliesPerRequest The maximum number of replies to allow. * @return A list of replies. */ - public abstract BatchedEntries makeRequest(K nextKey, int maxRepliesPerRequest) - throws IOException; + public abstract BatchedEntries makeRequest(K prevKey, + int maxRepliesPerRequest) throws IOException; private void makeRequest() throws IOException { idx = 0; entries = null; - entries = makeRequest(nextKey, maxRepliesPerRequest); + entries = makeRequest(prevKey, maxRepliesPerRequest); if (entries.size() > maxRepliesPerRequest) { throw new IOException("invalid number of replies returned: got " + entries.size() + ", expected " + maxRepliesPerRequest + @@ -106,7 +105,7 @@ public boolean hasNext() throws IOException { /** * Return the next list key associated with an element. */ - public abstract K elementToNextKey(E element); + public abstract K elementToPrevKey(E element); @Override public E next() throws IOException { @@ -115,7 +114,7 @@ public E next() throws IOException { throw new NoSuchElementException(); } E entry = entries.get(idx++); - nextKey = elementToNextKey(entry); + prevKey = elementToPrevKey(entry); return entry; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java index e162463d8d4..0972302cd26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java @@ -56,12 +56,12 @@ public InvalidPathNameError(PathCacheDirective directive) { } } - public static class InvalidPoolNameError + public static class InvalidPoolError extends AddPathCacheDirectiveException { private static final long serialVersionUID = 1L; - public InvalidPoolNameError(PathCacheDirective directive) { - super("invalid pool name '" + directive.getPool() + "'", directive); + public InvalidPoolError(PathCacheDirective directive) { + super("invalid pool id " + directive.getPoolId(), directive); } } @@ -70,7 +70,7 @@ public static class PoolWritePermissionDeniedError private static final long serialVersionUID = 1L; public PoolWritePermissionDeniedError(PathCacheDirective directive) { - super("write permission denied for pool '" + directive.getPool() + "'", + super("write permission denied for pool id " + directive.getPoolId(), directive); } } @@ -82,7 +82,9 @@ public static class UnexpectedAddPathCacheDirectiveException public UnexpectedAddPathCacheDirectiveException( PathCacheDirective directive) { super("encountered an unexpected error when trying to " + - "add path cache directive " + directive, directive); + "add path cache directive to pool id " + directive.getPoolId() + + " " + directive, + directive); } } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index 20006059a7a..cf05816c7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -18,35 +18,45 @@ package org.apache.hadoop.hdfs.protocol; -import javax.annotation.Nullable; - +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.permission.FsPermission; + +import com.google.common.base.Preconditions; /** * Information about a cache pool. + * + * CachePoolInfo permissions roughly map to Unix file permissions. + * Write permissions allow addition and removal of a {@link PathCacheEntry} from + * the pool. Execute permissions allow listing of PathCacheEntries in a pool. + * Read permissions have no associated meaning. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CachePoolInfo { - final String poolName; - @Nullable - String ownerName; + private String poolName; + private String ownerName; + private String groupName; + private FsPermission mode; + private Integer weight; - @Nullable - String groupName; - - @Nullable - Integer mode; - - @Nullable - Integer weight; + /** + * For Builder use + */ + private CachePoolInfo() {} + /** + * Use a CachePoolInfo {@link Builder} to create a new CachePoolInfo with + * more parameters + */ public CachePoolInfo(String poolName) { this.poolName = poolName; } - + public String getPoolName() { return poolName; } @@ -55,35 +65,103 @@ public String getOwnerName() { return ownerName; } - public CachePoolInfo setOwnerName(String ownerName) { - this.ownerName = ownerName; - return this; - } - public String getGroupName() { return groupName; } - public CachePoolInfo setGroupName(String groupName) { - this.groupName = groupName; - return this; - } - - public Integer getMode() { + public FsPermission getMode() { return mode; } - public CachePoolInfo setMode(Integer mode) { - this.mode = mode; - return this; - } - public Integer getWeight() { return weight; } - public CachePoolInfo setWeight(Integer weight) { - this.weight = weight; - return this; + public String toString() { + return new StringBuilder(). + append("{ ").append("poolName:").append(poolName). + append(", ownerName:").append(ownerName). + append(", groupName:").append(groupName). + append(", mode:").append(mode). + append(", weight:").append(weight). + append(" }").toString(); } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(poolName).append(ownerName) + .append(groupName).append(mode.toShort()).append(weight).hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { return false; } + if (obj == this) { return true; } + if (obj.getClass() != getClass()) { + return false; + } + CachePoolInfo rhs = (CachePoolInfo)obj; + return new EqualsBuilder() + .append(poolName, rhs.poolName) + .append(ownerName, rhs.ownerName) + .append(groupName, rhs.groupName) + .append(mode, rhs.mode) + .append(weight, rhs.weight) + .isEquals(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static Builder newBuilder(CachePoolInfo info) { + return new Builder(info); + } + + /** + * CachePoolInfo Builder + */ + public static class Builder { + private CachePoolInfo info; + + public Builder() { + this.info = new CachePoolInfo(); + } + + public Builder(CachePoolInfo info) { + this.info = info; + } + + public CachePoolInfo build() { + Preconditions.checkNotNull(info.poolName, + "Cannot create a CachePoolInfo without a pool name"); + return info; + } + + public Builder setPoolName(String poolName) { + info.poolName = poolName; + return this; + } + + public Builder setOwnerName(String ownerName) { + info.ownerName = ownerName; + return this; + } + + public Builder setGroupName(String groupName) { + info.groupName = groupName; + return this; + } + + public Builder setMode(FsPermission mode) { + info.mode = mode; + return this; + } + + public Builder setWeight(Integer weight) { + info.weight = weight; + return this; + } + } + } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index f07c950d215..df63b70256d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -1099,98 +1100,82 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, /** * Add some path cache directives to the CacheManager. - * - * @param directives - * A list of all the path cache directives we want to add. - * @return - * An list where each element is either a path cache entry that was - * added, or an IOException exception describing why the directive - * could not be added. + * + * @param directives A list of path cache directives to be added. + * @return A Fallible list, where each element is either a successfully addded + * path cache entry, or an IOException describing why the directive + * could not be added. */ @AtMostOnce - public List> - addPathCacheDirectives(List directives) - throws IOException; + public List> addPathCacheDirectives( + List directives) throws IOException; /** * Remove some path cache entries from the CacheManager. - * - * @param ids - * A list of all the IDs we want to remove from the CacheManager. - * @return - * An list where each element is either an ID that was removed, - * or an IOException exception describing why the ID could not be - * removed. + * + * @param ids A list of all the entry IDs to be removed from the CacheManager. + * @return A Fallible list where each element is either a successfully removed + * ID, or an IOException describing why the ID could not be removed. */ - @AtMostOnce + @Idempotent public List> removePathCacheEntries(List ids) throws IOException; /** - * List cached paths on the server. - * - * @param prevId - * The previous ID that we listed, or 0 if this is the first call - * to listPathCacheEntries. - * @param pool - * The pool ID to list. If this is the empty string, all pool ids - * will be listed. - * @param maxRepliesPerRequest - * The maximum number of replies to make in each request. - * @return - * A RemoteIterator from which you can get PathCacheEntry objects. - * Requests will be made as needed. + * List the set of cached paths of a cache pool. Incrementally fetches results + * from the server. + * + * @param prevId The last listed entry ID, or -1 if this is the first call to + * listPathCacheEntries. + * @param pool The cache pool to list, or -1 to list all pools + * @param maxRepliesPerRequest The maximum number of entries to return per + * request + * @return A RemoteIterator which returns PathCacheEntry objects. */ @Idempotent public RemoteIterator listPathCacheEntries(long prevId, - String pool, int maxRepliesPerRequest) throws IOException; - - /** - * Modify a cache pool. - * - * @param req - * The request to modify a cache pool. - * @throws IOException - * If the request could not be completed. - */ - @AtMostOnce - public void addCachePool(CachePoolInfo info) throws IOException; + long poolId, int maxRepliesPerRequest) throws IOException; /** - * Modify a cache pool. - * - * @param req - * The request to modify a cache pool. - * @throws IOException - * If the request could not be completed. + * Add a new cache pool. + * + * @param info Description of the new cache pool + * @throws IOException If the request could not be completed. */ - @Idempotent - public void modifyCachePool(CachePoolInfo req) throws IOException; - + @AtMostOnce + public CachePool addCachePool(CachePoolInfo info) throws IOException; + + /** + * Modify a cache pool, e.g. pool name, permissions, owner, group. + * + * @param poolId ID of the cache pool to modify + * @param info New metadata for the cache pool + * @throws IOException If the request could not be completed. + */ + @AtMostOnce + public void modifyCachePool(long poolId, CachePoolInfo info) + throws IOException; + /** * Remove a cache pool. - * - * @param cachePoolName - * Name of the cache pool to remove. - * @throws IOException - * if the cache pool did not exist, or could not be removed. - */ - @AtMostOnce - public void removeCachePool(String cachePoolName) throws IOException; - - /** - * List some cache pools. - * - * @param prevKey - * The previous key we listed. We will list keys greater than this. - * @param maxRepliesPerRequest - * Maximum number of cache pools to list. - * @return A remote iterator from which you can get CachePool objects. - * Requests will be made as needed. - * @throws IOException - * If there was an error listing cache pools. + * + * @param poolId ID of the cache pool to remove. + * @throws IOException if the cache pool did not exist, or could not be + * removed. */ @Idempotent - public RemoteIterator listCachePools(String prevKey, + public void removeCachePool(long poolId) throws IOException; + + /** + * List the set of cache pools. Incrementally fetches results from the server. + * + * @param prevPoolId ID of the last pool listed, or -1 if this is the first + * invocation of listCachePools + * @param maxRepliesPerRequest Maximum number of cache pools to return per + * server request. + * @return A RemoteIterator which returns CachePool objects. + */ + @Idempotent + public RemoteIterator listCachePools(long prevPoolId, int maxRepliesPerRequest) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java index 8c6d742d4cd..cab8dc45f28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java @@ -25,7 +25,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; /** @@ -33,14 +33,13 @@ */ public class PathCacheDirective implements Comparable { private final String path; + private final long poolId; - private final String pool; - - public PathCacheDirective(String path, String pool) { + public PathCacheDirective(String path, long poolId) { Preconditions.checkNotNull(path); - Preconditions.checkNotNull(pool); + Preconditions.checkArgument(poolId > 0); this.path = path; - this.pool = pool; + this.poolId = poolId; } /** @@ -53,8 +52,8 @@ public String getPath() { /** * @return The pool used in this request. */ - public String getPool() { - return pool; + public long getPoolId() { + return poolId; } /** @@ -70,22 +69,22 @@ public void validate() throws IOException { if (!DFSUtil.isValidName(path)) { throw new InvalidPathNameError(this); } - if (pool.isEmpty()) { - throw new InvalidPoolNameError(this); + if (poolId <= 0) { + throw new InvalidPoolError(this); } } @Override public int compareTo(PathCacheDirective rhs) { return ComparisonChain.start(). - compare(pool, rhs.getPool()). + compare(poolId, rhs.getPoolId()). compare(path, rhs.getPath()). result(); } @Override public int hashCode() { - return new HashCodeBuilder().append(path).append(pool).hashCode(); + return new HashCodeBuilder().append(path).append(poolId).hashCode(); } @Override @@ -102,7 +101,7 @@ public boolean equals(Object o) { public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ path:").append(path). - append(", pool:").append(pool). + append(", poolId:").append(poolId). append(" }"); return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index f9a5bfbc914..d31162497bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -29,8 +29,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -113,7 +112,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; @@ -173,7 +171,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.INodeId; -import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; @@ -1038,16 +1035,19 @@ public IsFileClosedResponseProto isFileClosed( } @Override - public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller, - AddPathCacheDirectivesRequestProto request) throws ServiceException { + public AddPathCacheDirectivesResponseProto addPathCacheDirectives( + RpcController controller, AddPathCacheDirectivesRequestProto request) + throws ServiceException { try { ArrayList input = new ArrayList(request.getElementsCount()); for (int i = 0; i < request.getElementsCount(); i++) { PathCacheDirectiveProto proto = request.getElements(i); - input.add(new PathCacheDirective(proto.getPath(), proto.getPool())); + input.add(new PathCacheDirective(proto.getPath(), + proto.getPool().getId())); } - List> output = server.addPathCacheDirectives(input); + List> output = server + .addPathCacheDirectives(input); AddPathCacheDirectivesResponseProto.Builder builder = AddPathCacheDirectivesResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { @@ -1060,7 +1060,7 @@ public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController } catch (InvalidPathNameError ioe) { builder.addResults(AddPathCacheDirectiveErrorProto. INVALID_PATH_NAME_ERROR_VALUE); - } catch (InvalidPoolNameError ioe) { + } catch (InvalidPoolError ioe) { builder.addResults(AddPathCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE); } catch (IOException ioe) { @@ -1108,22 +1108,21 @@ public RemovePathCacheEntriesResponseProto removePathCacheEntries( } @Override - public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller, - ListPathCacheEntriesRequestProto request) throws ServiceException { + public ListPathCacheEntriesResponseProto listPathCacheEntries( + RpcController controller, ListPathCacheEntriesRequestProto request) + throws ServiceException { try { + CachePool pool = PBHelper.convert(request.getPool()); RemoteIterator iter = - server.listPathCacheEntries(request.getPrevId(), - request.getPool(), + server.listPathCacheEntries( + PBHelper.convert(request.getPrevEntry()).getEntryId(), + pool.getId(), request.getMaxReplies()); ListPathCacheEntriesResponseProto.Builder builder = ListPathCacheEntriesResponseProto.newBuilder(); while (iter.hasNext()) { PathCacheEntry entry = iter.next(); - builder.addElements( - ListPathCacheEntriesElementProto.newBuilder(). - setId(entry.getEntryId()). - setPath(entry.getDirective().getPath()). - setPool(entry.getDirective().getPool())); + builder.addEntries(PBHelper.convert(entry)); } return builder.build(); } catch (IOException e) { @@ -1135,46 +1134,20 @@ public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController cont public AddCachePoolResponseProto addCachePool(RpcController controller, AddCachePoolRequestProto request) throws ServiceException { try { - CachePoolInfo info = - new CachePoolInfo(request.getPoolName()); - if (request.hasOwnerName()) { - info.setOwnerName(request.getOwnerName()); - } - if (request.hasGroupName()) { - info.setGroupName(request.getGroupName()); - } - if (request.hasMode()) { - info.setMode(request.getMode()); - } - if (request.hasWeight()) { - info.setWeight(request.getWeight()); - } - server.addCachePool(info); + server.addCachePool(PBHelper.convert(request.getInfo())); return AddCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); } } - + @Override public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, ModifyCachePoolRequestProto request) throws ServiceException { try { - CachePoolInfo info = - new CachePoolInfo(request.getPoolName()); - if (request.hasOwnerName()) { - info.setOwnerName(request.getOwnerName()); - } - if (request.hasGroupName()) { - info.setGroupName(request.getGroupName()); - } - if (request.hasMode()) { - info.setMode(request.getMode()); - } - if (request.hasWeight()) { - info.setWeight(request.getWeight()); - } - server.modifyCachePool(info); + server.modifyCachePool( + PBHelper.convert(request.getPool()).getId(), + PBHelper.convert(request.getInfo())); return ModifyCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -1185,7 +1158,7 @@ public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, public RemoveCachePoolResponseProto removeCachePool(RpcController controller, RemoveCachePoolRequestProto request) throws ServiceException { try { - server.removeCachePool(request.getPoolName()); + server.removeCachePool(PBHelper.convert(request.getPool()).getId()); return RemoveCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -1196,28 +1169,16 @@ public RemoveCachePoolResponseProto removeCachePool(RpcController controller, public ListCachePoolsResponseProto listCachePools(RpcController controller, ListCachePoolsRequestProto request) throws ServiceException { try { - RemoteIterator iter = - server.listCachePools(request.getPrevPoolName(), + RemoteIterator iter = + server.listCachePools(PBHelper.convert(request.getPrevPool()).getId(), request.getMaxReplies()); ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder(); while (iter.hasNext()) { - CachePoolInfo pool = iter.next(); - ListCachePoolsResponseElementProto.Builder elemBuilder = + CachePool pool = iter.next(); + ListCachePoolsResponseElementProto.Builder elemBuilder = ListCachePoolsResponseElementProto.newBuilder(); - elemBuilder.setPoolName(pool.getPoolName()); - if (pool.getOwnerName() != null) { - elemBuilder.setOwnerName(pool.getOwnerName()); - } - if (pool.getGroupName() != null) { - elemBuilder.setGroupName(pool.getGroupName()); - } - if (pool.getMode() != null) { - elemBuilder.setMode(pool.getMode()); - } - if (pool.getWeight() != null) { - elemBuilder.setWeight(pool.getWeight()); - } + elemBuilder.setPool(PBHelper.convert(pool)); responseBuilder.addElements(elemBuilder.build()); } return responseBuilder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 4b8687e1d99..9005cc28e98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -38,17 +37,12 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -61,14 +55,18 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; @@ -109,23 +107,23 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; @@ -146,6 +144,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -1027,7 +1026,7 @@ private static IOException addPathCacheDirectivesError(long code, return new InvalidPathNameError(directive); } else if (code == AddPathCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE) { - return new InvalidPoolNameError(directive); + return new InvalidPoolError(directive); } else { return new UnexpectedAddPathCacheDirectiveException(directive); } @@ -1042,7 +1041,7 @@ public List> addPathCacheDirectives( for (PathCacheDirective directive : directives) { builder.addElements(PathCacheDirectiveProto.newBuilder(). setPath(directive.getPath()). - setPool(directive.getPool()). + setPool(PBHelper.convert(new CachePool(directive.getPoolId()))). build()); } AddPathCacheDirectivesResponseProto result = @@ -1121,42 +1120,40 @@ private static class BatchedPathCacheEntries @Override public PathCacheEntry get(int i) { - ListPathCacheEntriesElementProto elementProto = - response.getElements(i); - return new PathCacheEntry(elementProto.getId(), - new PathCacheDirective(elementProto.getPath(), - elementProto.getPool())); + PathCacheEntryProto entryProto = response.getEntries(i); + return PBHelper.convert(entryProto); } @Override public int size() { - return response.getElementsCount(); + return response.getEntriesCount(); } } private class PathCacheEntriesIterator extends BatchedRemoteIterator { - private final String pool; + private final long poolId; public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest, - String pool) { + long poolId) { super(prevKey, maxRepliesPerRequest); - this.pool = pool; + this.poolId = poolId; } @Override public BatchedEntries makeRequest( - Long nextKey, int maxRepliesPerRequest) throws IOException { + Long prevEntryId, int maxRepliesPerRequest) throws IOException { ListPathCacheEntriesResponseProto response; try { ListPathCacheEntriesRequestProto req = ListPathCacheEntriesRequestProto.newBuilder(). - setPrevId(nextKey). - setPool(pool). + setPrevEntry( + PBHelper.convert(new PathCacheEntry(prevEntryId, null))). + setPool(PBHelper.convert(new CachePool(poolId))). setMaxReplies(maxRepliesPerRequest). build(); response = rpcProxy.listPathCacheEntries(null, req); - if (response.getElementsCount() == 0) { + if (response.getEntriesCount() == 0) { response = null; } } catch (ServiceException e) { @@ -1166,58 +1163,37 @@ public BatchedEntries makeRequest( } @Override - public Long elementToNextKey(PathCacheEntry element) { + public Long elementToPrevKey(PathCacheEntry element) { return element.getEntryId(); } } @Override public RemoteIterator listPathCacheEntries(long prevId, - String pool, int repliesPerRequest) throws IOException { - return new PathCacheEntriesIterator(prevId, repliesPerRequest, pool); + long poolId, int repliesPerRequest) throws IOException { + return new PathCacheEntriesIterator(prevId, repliesPerRequest, poolId); } @Override - public void addCachePool(CachePoolInfo info) throws IOException { - AddCachePoolRequestProto.Builder builder = + public CachePool addCachePool(CachePoolInfo info) throws IOException { + AddCachePoolRequestProto.Builder builder = AddCachePoolRequestProto.newBuilder(); - builder.setPoolName(info.getPoolName()); - if (info.getOwnerName() != null) { - builder.setOwnerName(info.getOwnerName()); - } - if (info.getGroupName() != null) { - builder.setGroupName(info.getGroupName()); - } - if (info.getMode() != null) { - builder.setMode(info.getMode()); - } - if (info.getWeight() != null) { - builder.setWeight(info.getWeight()); - } + builder.setInfo(PBHelper.convert(info)); try { - rpcProxy.addCachePool(null, builder.build()); + return PBHelper.convert( + rpcProxy.addCachePool(null, builder.build()).getPool()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override - public void modifyCachePool(CachePoolInfo req) throws IOException { - ModifyCachePoolRequestProto.Builder builder = - ModifyCachePoolRequestProto.newBuilder(); - builder.setPoolName(req.getPoolName()); - if (req.getOwnerName() != null) { - builder.setOwnerName(req.getOwnerName()); - } - if (req.getGroupName() != null) { - builder.setGroupName(req.getGroupName()); - } - if (req.getMode() != null) { - builder.setMode(req.getMode()); - } - if (req.getWeight() != null) { - builder.setWeight(req.getWeight()); - } + public void modifyCachePool(long poolId, CachePoolInfo info) + throws IOException { + ModifyCachePoolRequestProto.Builder builder = + ModifyCachePoolRequestProto.newBuilder() + .setPool(PBHelper.convert(new CachePool(poolId))) + .setInfo(PBHelper.convert(info)); try { rpcProxy.modifyCachePool(null, builder.build()); } catch (ServiceException e) { @@ -1226,32 +1202,30 @@ public void modifyCachePool(CachePoolInfo req) throws IOException { } @Override - public void removeCachePool(String cachePoolName) throws IOException { + public void removeCachePool(long poolId) throws IOException { try { - rpcProxy.removeCachePool(null, + rpcProxy.removeCachePool(null, RemoveCachePoolRequestProto.newBuilder(). - setPoolName(cachePoolName).build()); + setPool(PBHelper.convert(new CachePool(poolId))). + build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } private static class BatchedPathDirectiveEntries - implements BatchedEntries { + implements BatchedEntries { + private final ListCachePoolsResponseProto proto; - + public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) { this.proto = proto; } - + @Override - public CachePoolInfo get(int i) { + public CachePool get(int i) { ListCachePoolsResponseElementProto elem = proto.getElements(i); - return new CachePoolInfo(elem.getPoolName()). - setOwnerName(elem.getOwnerName()). - setGroupName(elem.getGroupName()). - setMode(elem.getMode()). - setWeight(elem.getWeight()); + return PBHelper.convert(elem.getPool()); } @Override @@ -1259,37 +1233,38 @@ public int size() { return proto.getElementsCount(); } } - - private class CachePoolIterator - extends BatchedRemoteIterator { - public CachePoolIterator(String prevKey, int maxRepliesPerRequest) { + private class CachePoolIterator + extends BatchedRemoteIterator { + + public CachePoolIterator(Long prevKey, int maxRepliesPerRequest) { super(prevKey, maxRepliesPerRequest); } @Override - public BatchedEntries makeRequest(String prevKey, + public BatchedEntries makeRequest(Long prevKey, int maxRepliesPerRequest) throws IOException { try { return new BatchedPathDirectiveEntries( - rpcProxy.listCachePools(null, + rpcProxy.listCachePools(null, ListCachePoolsRequestProto.newBuilder(). - setPrevPoolName(prevKey). - setMaxReplies(maxRepliesPerRequest).build())); + setPrevPool(PBHelper.convert(new CachePool(prevKey))). + setMaxReplies(maxRepliesPerRequest). + build())); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override - public String elementToNextKey(CachePoolInfo element) { - return element.getPoolName(); + public Long elementToPrevKey(CachePool element) { + return element.getId(); } } @Override - public RemoteIterator listCachePools(String prevKey, + public RemoteIterator listCachePools(long prevPoolId, int maxRepliesPerRequest) throws IOException { - return new CachePoolIterator(prevKey, maxRepliesPerRequest); + return new CachePoolIterator(prevPoolId, maxRepliesPerRequest); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4051d01e031..862527a0130 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -32,10 +32,13 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -50,9 +53,15 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; @@ -114,6 +123,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; @@ -1493,6 +1503,74 @@ public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { return HdfsProtos.ChecksumTypeProto.valueOf(type.id); } + public static PathCacheDirective convert( + PathCacheDirectiveProto directiveProto) { + CachePool pool = convert(directiveProto.getPool()); + return new PathCacheDirective(directiveProto.getPath(), pool.getId()); + } + + public static PathCacheDirectiveProto convert(PathCacheDirective directive) { + PathCacheDirectiveProto.Builder builder = + PathCacheDirectiveProto.newBuilder() + .setPath(directive.getPath()) + .setPool(PBHelper.convert(new CachePool(directive.getPoolId()))); + return builder.build(); + } + + public static PathCacheEntry convert(PathCacheEntryProto entryProto) { + long entryId = entryProto.getId(); + PathCacheDirective directive = convert(entryProto.getDirective()); + return new PathCacheEntry(entryId, directive); + } + + public static PathCacheEntryProto convert(PathCacheEntry entry) { + PathCacheEntryProto.Builder builder = PathCacheEntryProto.newBuilder() + .setId(entry.getEntryId()) + .setDirective(PBHelper.convert(entry.getDirective())); + return builder.build(); + } + + public static CachePoolInfo convert(CachePoolInfoProto infoProto) { + CachePoolInfo.Builder builder = + CachePoolInfo.newBuilder().setPoolName(infoProto.getPoolName()); + if (infoProto.hasOwnerName()) { + builder.setOwnerName(infoProto.getOwnerName()); + } + if (infoProto.hasGroupName()) { + builder.setGroupName(infoProto.getGroupName()); + } + if (infoProto.hasMode()) { + builder.setMode(new FsPermission((short) infoProto.getMode())); + } + if (infoProto.hasWeight()) { + builder.setWeight(infoProto.getWeight()); + } + return builder.build(); + } + + public static CachePoolInfoProto convert(CachePoolInfo info) { + CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder() + .setPoolName(info.getPoolName()) + .setOwnerName(info.getOwnerName()) + .setGroupName(info.getGroupName()) + .setMode(info.getMode().toShort()) + .setWeight(info.getWeight()); + return builder.build(); + } + + public static CachePool convert(CachePoolProto poolProto) { + CachePoolInfo info = convert(poolProto.getInfo()); + CachePool pool = new CachePool(poolProto.getId(), info); + return pool; + } + + public static CachePoolProto convert(CachePool pool) { + CachePoolProto.Builder builder = CachePoolProto.newBuilder() + .setId(pool.getId()) + .setInfo(convert(pool.getInfo())); + return builder.build(); + } + public static InputStream vintPrefixed(final InputStream input) throws IOException { final int firstByte = input.read(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 06475802c61..83834d967e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -19,25 +19,26 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.util.Fallible; /** @@ -64,14 +65,25 @@ final class CacheManager { /** * Cache pools, sorted by name. */ - private final TreeMap cachePools = + private final TreeMap cachePoolsByName = new TreeMap(); + /** + * Cache pools, sorted by ID + */ + private final TreeMap cachePoolsById = + new TreeMap(); + /** * The entry ID to use for a new entry. */ private long nextEntryId; + /** + * The pool ID to use for a new pool. + */ + private long nextPoolId; + CacheManager(FSDirectory dir, Configuration conf) { // TODO: support loading and storing of the CacheManager state clear(); @@ -80,26 +92,35 @@ final class CacheManager { synchronized void clear() { entriesById.clear(); entriesByDirective.clear(); + cachePoolsByName.clear(); + cachePoolsById.clear(); nextEntryId = 1; + nextPoolId = 1; } synchronized long getNextEntryId() throws IOException { if (nextEntryId == Long.MAX_VALUE) { - throw new IOException("no more available IDs"); + throw new IOException("no more available entry IDs"); } return nextEntryId++; } + synchronized long getNextPoolId() throws IOException { + if (nextPoolId == Long.MAX_VALUE) { + throw new IOException("no more available pool IDs"); + } + return nextPoolId++; + } + private synchronized Fallible addDirective( - PathCacheDirective directive, FSPermissionChecker pc) { - CachePool pool = cachePools.get(directive.getPool()); + FSPermissionChecker pc, PathCacheDirective directive) { + CachePool pool = cachePoolsById.get(directive.getPoolId()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); return new Fallible( - new InvalidPoolNameError(directive)); + new InvalidPoolError(directive)); } - if (!pc.checkWritePermission(pool.getOwnerName(), - pool.getGroupName(), pool.getMode())) { + if (!pc.checkPermission(pool, FsAction.WRITE)) { LOG.info("addDirective " + directive + ": write permission denied."); return new Fallible( new PoolWritePermissionDeniedError(directive)); @@ -134,17 +155,17 @@ private synchronized Fallible addDirective( } public synchronized List> addDirectives( - List directives, FSPermissionChecker pc) { + FSPermissionChecker pc, List directives) { ArrayList> results = new ArrayList>(directives.size()); for (PathCacheDirective directive: directives) { - results.add(addDirective(directive, pc)); + results.add(addDirective(pc, directive)); } return results; } - private synchronized Fallible removeEntry(long entryId, - FSPermissionChecker pc) { + private synchronized Fallible removeEntry(FSPermissionChecker pc, + long entryId) { // Check for invalid IDs. if (entryId <= 0) { LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); @@ -156,23 +177,20 @@ private synchronized Fallible removeEntry(long entryId, LOG.info("removeEntry " + entryId + ": entry not found."); return new Fallible(new NoSuchIdException(entryId)); } - CachePool pool = cachePools.get(existing.getDirective().getPool()); + CachePool pool = cachePoolsById.get(existing.getDirective().getPoolId()); if (pool == null) { LOG.info("removeEntry " + entryId + ": pool not found for directive " + existing.getDirective()); return new Fallible( new UnexpectedRemovePathCacheEntryException(entryId)); } - if (!pc.isSuperUser()) { - if (!pc.checkWritePermission(pool.getOwnerName(), - pool.getGroupName(), pool.getMode())) { - LOG.info("removeEntry " + entryId + ": write permission denied to " + - "pool " + pool + " for entry " + existing); - return new Fallible( - new RemovePermissionDeniedException(entryId)); - } + if (!pc.checkPermission(pool, FsAction.WRITE)) { + LOG.info("removeEntry " + entryId + ": write permission denied to " + + "pool " + pool + " for entry " + existing); + return new Fallible( + new RemovePermissionDeniedException(entryId)); } - + // Remove the corresponding entry in entriesByDirective. if (entriesByDirective.remove(existing.getDirective()) == null) { LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + @@ -184,36 +202,43 @@ private synchronized Fallible removeEntry(long entryId, return new Fallible(entryId); } - public synchronized List> removeEntries(List entryIds, - FSPermissionChecker pc) { + public synchronized List> removeEntries(FSPermissionChecker pc, + List entryIds) { ArrayList> results = new ArrayList>(entryIds.size()); for (Long entryId : entryIds) { - results.add(removeEntry(entryId, pc)); + results.add(removeEntry(pc, entryId)); } return results; } - public synchronized List listPathCacheEntries(long prevId, - String pool, int maxReplies) { + public synchronized List listPathCacheEntries( + FSPermissionChecker pc, long prevId, Long poolId, int maxReplies) { final int MAX_PRE_ALLOCATED_ENTRIES = 16; - ArrayList replies = - new ArrayList(Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); + ArrayList replies = new ArrayList( + Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); int numReplies = 0; SortedMap tailMap = entriesById.tailMap(prevId + 1); - for (Entry cur : tailMap.entrySet()) { + for (PathCacheEntry entry : tailMap.values()) { if (numReplies >= maxReplies) { return replies; } - if (pool.isEmpty() || cur.getValue().getDirective(). - getPool().equals(pool)) { - replies.add(cur.getValue()); - numReplies++; + long entryPoolId = entry.getDirective().getPoolId(); + if (poolId == null || poolId <= 0 || entryPoolId == poolId) { + if (pc.checkPermission( + cachePoolsById.get(entryPoolId), FsAction.EXECUTE)) { + replies.add(entry); + numReplies++; + } } } return replies; } + synchronized CachePool getCachePool(long id) { + return cachePoolsById.get(id); + } + /** * Create a cache pool. * @@ -221,22 +246,24 @@ public synchronized List listPathCacheEntries(long prevId, * * @param info * The info for the cache pool to create. + * @return created CachePool */ - public synchronized void addCachePool(CachePoolInfo info) + public synchronized CachePool addCachePool(CachePoolInfo info) throws IOException { String poolName = info.getPoolName(); - if (poolName.isEmpty()) { + if (poolName == null || poolName.isEmpty()) { throw new IOException("invalid empty cache pool name"); } - CachePool pool = cachePools.get(poolName); - if (pool != null) { + if (cachePoolsByName.containsKey(poolName)) { throw new IOException("cache pool " + poolName + " already exists."); } - CachePool cachePool = new CachePool(poolName, + CachePool cachePool = new CachePool(getNextPoolId(), poolName, info.getOwnerName(), info.getGroupName(), info.getMode(), info.getWeight()); - cachePools.put(poolName, cachePool); + cachePoolsById.put(cachePool.getId(), cachePool); + cachePoolsByName.put(poolName, cachePool); LOG.info("created new cache pool " + cachePool); + return cachePool; } /** @@ -247,46 +274,62 @@ public synchronized void addCachePool(CachePoolInfo info) * @param info * The info for the cache pool to modify. */ - public synchronized void modifyCachePool(CachePoolInfo info) + public synchronized void modifyCachePool(long poolId, CachePoolInfo info) throws IOException { - String poolName = info.getPoolName(); - if (poolName.isEmpty()) { - throw new IOException("invalid empty cache pool name"); + if (poolId <= 0) { + throw new IOException("invalid pool id " + poolId); } - CachePool pool = cachePools.get(poolName); - if (pool == null) { - throw new IOException("cache pool " + poolName + " does not exist."); + if (!cachePoolsById.containsKey(poolId)) { + throw new IOException("cache pool id " + poolId + " does not exist."); } + CachePool pool = cachePoolsById.get(poolId); + // Remove the old CachePoolInfo + removeCachePool(poolId); + // Build up the new CachePoolInfo + CachePoolInfo.Builder newInfo = CachePoolInfo.newBuilder(pool.getInfo()); StringBuilder bld = new StringBuilder(); String prefix = ""; + if (info.getPoolName() != null) { + newInfo.setPoolName(info.getPoolName()); + bld.append(prefix). + append("set name to ").append(info.getOwnerName()); + prefix = "; "; + } if (info.getOwnerName() != null) { - pool.setOwnerName(info.getOwnerName()); + newInfo.setOwnerName(info.getOwnerName()); bld.append(prefix). append("set owner to ").append(info.getOwnerName()); prefix = "; "; } if (info.getGroupName() != null) { - pool.setGroupName(info.getGroupName()); + newInfo.setGroupName(info.getGroupName()); bld.append(prefix). append("set group to ").append(info.getGroupName()); prefix = "; "; } if (info.getMode() != null) { - pool.setMode(info.getMode()); + newInfo.setMode(info.getMode()); bld.append(prefix). - append(String.format("set mode to 0%3o", info.getMode())); + append(String.format("set mode to ", info.getMode())); prefix = "; "; } if (info.getWeight() != null) { - pool.setWeight(info.getWeight()); + newInfo.setWeight(info.getWeight()); bld.append(prefix). append("set weight to ").append(info.getWeight()); prefix = "; "; } if (prefix.isEmpty()) { bld.append("no changes."); + } else { + pool.setInfo(newInfo.build()); } - LOG.info("modified " + poolName + "; " + bld.toString()); + // Put the newly modified info back in + cachePoolsById.put(poolId, pool); + cachePoolsByName.put(info.getPoolName(), pool); + LOG.info("modified pool id " + pool.getId() + + " (" + pool.getInfo().getPoolName() + "); " + + bld.toString()); } /** @@ -294,27 +337,38 @@ public synchronized void modifyCachePool(CachePoolInfo info) * * Only the superuser should be able to call this function. * - * @param poolName - * The name for the cache pool to remove. + * @param poolId + * The id of the cache pool to remove. */ - public synchronized void removeCachePool(String poolName) - throws IOException { - CachePool pool = cachePools.remove(poolName); - if (pool == null) { - throw new IOException("can't remove nonexistent cache pool " + poolName); + public synchronized void removeCachePool(long poolId) throws IOException { + if (!cachePoolsById.containsKey(poolId)) { + throw new IOException("can't remove nonexistent cache pool id " + poolId); } + // Remove all the entries associated with the pool + Iterator> it = + entriesById.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + if (entry.getValue().getDirective().getPoolId() == poolId) { + it.remove(); + entriesByDirective.remove(entry.getValue().getDirective()); + } + } + // Remove the pool + CachePool pool = cachePoolsById.remove(poolId); + cachePoolsByName.remove(pool.getInfo().getPoolName()); } - public synchronized List - listCachePools(FSPermissionChecker pc, String prevKey, - int maxRepliesPerRequest) { + public synchronized List listCachePools(Long prevKey, + int maxRepliesPerRequest) { final int MAX_PREALLOCATED_REPLIES = 16; - ArrayList results = - new ArrayList(Math.min(MAX_PREALLOCATED_REPLIES, + ArrayList results = + new ArrayList(Math.min(MAX_PREALLOCATED_REPLIES, maxRepliesPerRequest)); - SortedMap tailMap = cachePools.tailMap(prevKey, false); - for (Entry cur : tailMap.entrySet()) { - results.add(cur.getValue().getInfo(pc)); + SortedMap tailMap = + cachePoolsById.tailMap(prevKey, false); + for (CachePool pool : tailMap.values()) { + results.add(pool); } return results; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 8a8f30b8121..5de424ac7d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -19,123 +19,119 @@ import java.io.IOException; -import javax.annotation.Nonnull; - +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo.Builder; import org.apache.hadoop.security.UserGroupInformation; /** - * The NameNode uses CachePools to manage cache resources on the DataNodes. + * A CachePool describes a set of cache resources being managed by the NameNode. + * User caching requests are billed to the cache pool specified in the request. + * + * CachePools are uniquely identified by a numeric id as well as the + * {@link CachePoolInfo} pool name. Mutable metadata is contained in + * CachePoolInfo, including pool name, owner, group, and permissions. + * See this class for more details. */ public final class CachePool { public static final Log LOG = LogFactory.getLog(CachePool.class); - @Nonnull - private final String poolName; + private final long id; - @Nonnull - private String ownerName; + private CachePoolInfo info; - @Nonnull - private String groupName; - - private int mode; - - private int weight; - - public static String getCurrentUserPrimaryGroupName() throws IOException { - UserGroupInformation ugi= NameNode.getRemoteUser(); - String[] groups = ugi.getGroupNames(); - if (groups.length == 0) { - throw new IOException("failed to get group names from UGI " + ugi); + public CachePool(long id) { + this.id = id; + this.info = null; + } + + CachePool(long id, String poolName, String ownerName, String groupName, + FsPermission mode, Integer weight) throws IOException { + this.id = id; + // Set CachePoolInfo default fields if null + if (poolName == null || poolName.isEmpty()) { + throw new IOException("invalid empty cache pool name"); } - return groups[0]; - } - - public CachePool(String poolName, String ownerName, String groupName, - Integer mode, Integer weight) throws IOException { - this.poolName = poolName; - this.ownerName = ownerName != null ? ownerName : - NameNode.getRemoteUser().getShortUserName(); - this.groupName = groupName != null ? groupName : - getCurrentUserPrimaryGroupName(); - this.mode = mode != null ? mode : 0644; - this.weight = weight != null ? weight : 100; + UserGroupInformation ugi = null; + if (ownerName == null) { + ugi = NameNode.getRemoteUser(); + ownerName = ugi.getShortUserName(); + } + if (groupName == null) { + if (ugi == null) { + ugi = NameNode.getRemoteUser(); + } + String[] groups = ugi.getGroupNames(); + if (groups.length == 0) { + throw new IOException("failed to get group names from UGI " + ugi); + } + groupName = groups[0]; + } + if (mode == null) { + mode = FsPermission.getDirDefault(); + } + if (weight == null) { + weight = 100; + } + CachePoolInfo.Builder builder = CachePoolInfo.newBuilder(); + builder.setPoolName(poolName).setOwnerName(ownerName) + .setGroupName(groupName).setMode(mode).setWeight(weight); + this.info = builder.build(); } - public String getName() { - return poolName; + public CachePool(long id, CachePoolInfo info) { + this.id = id; + this.info = info; } - public String getOwnerName() { - return ownerName; + /** + * @return id of the pool + */ + public long getId() { + return id; } - public CachePool setOwnerName(String ownerName) { - this.ownerName = ownerName; - return this; - } - - public String getGroupName() { - return groupName; - } - - public CachePool setGroupName(String groupName) { - this.groupName = groupName; - return this; - } - - public int getMode() { - return mode; - } - - public CachePool setMode(int mode) { - this.mode = mode; - return this; - } - - public int getWeight() { - return weight; - } - - public CachePool setWeight(int weight) { - this.weight = weight; - return this; - } - /** * Get information about this cache pool. * - * @param fullInfo - * If true, only the name will be returned (i.e., what you - * would get if you didn't have read permission for this pool.) * @return * Cache pool information. */ - public CachePoolInfo getInfo(boolean fullInfo) { - CachePoolInfo info = new CachePoolInfo(poolName); - if (!fullInfo) { - return info; - } - return info.setOwnerName(ownerName). - setGroupName(groupName). - setMode(mode). - setWeight(weight); + public CachePoolInfo getInfo() { + return info; } - public CachePoolInfo getInfo(FSPermissionChecker pc) { - return getInfo(pc.checkReadPermission(ownerName, groupName, mode)); + void setInfo(CachePoolInfo info) { + this.info = info; } public String toString() { return new StringBuilder(). - append("{ ").append("poolName:").append(poolName). - append(", ownerName:").append(ownerName). - append(", groupName:").append(groupName). - append(", mode:").append(String.format("%3o", mode)). - append(", weight:").append(weight). + append("{ ").append("id:").append(id). + append(", info:").append(info.toString()). append(" }").toString(); } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(id).append(info).hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { return false; } + if (obj == this) { return true; } + if (obj.getClass() != getClass()) { + return false; + } + CachePool rhs = (CachePool)obj; + return new EqualsBuilder() + .append(id, rhs.id) + .append(info, rhs.info) + .isEquals(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ca287ab7dcb..040a3b422ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6701,7 +6701,6 @@ void deleteSnapshot(String snapshotRoot, String snapshotName) return; // Return previous response } boolean success = false; - checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6749,6 +6748,7 @@ void removeSnapshottableDirs(List toRemove) { } } + @SuppressWarnings("unchecked") List> addPathCacheDirectives( List directives) throws IOException { CacheEntryWithPayload retryCacheEntry = @@ -6759,7 +6759,6 @@ List> addPathCacheDirectives( final FSPermissionChecker pc = getPermissionChecker(); boolean success = false; List> results = null; - checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6767,7 +6766,7 @@ List> addPathCacheDirectives( throw new SafeModeException( "Cannot add path cache directive", safeMode); } - results = cacheManager.addDirectives(directives, pc); + results = cacheManager.addDirectives(pc, directives); //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 success = true; } finally { @@ -6775,7 +6774,7 @@ List> addPathCacheDirectives( if (success) { getEditLog().logSync(); } - if (isAuditEnabled() && isExternalInvocation()) { + if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(success, "addPathCacheDirectives", null, null, null); } RetryCache.setState(retryCacheEntry, success, results); @@ -6783,58 +6782,50 @@ List> addPathCacheDirectives( return results; } - List> removePathCacheEntries(List ids) throws IOException { - CacheEntryWithPayload retryCacheEntry = - RetryCache.waitForCompletion(retryCache, null); - if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (List>) retryCacheEntry.getPayload(); - } + @SuppressWarnings("unchecked") + List> removePathCacheEntries(List ids) + throws IOException { final FSPermissionChecker pc = getPermissionChecker(); boolean success = false; List> results = null; - checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { throw new SafeModeException( - "Cannot remove path cache directives", safeMode); + "Cannot add path cache directive", safeMode); } - results = cacheManager.removeEntries(ids, pc); + results = cacheManager.removeEntries(pc, ids); //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); - if (isAuditEnabled() && isExternalInvocation()) { + if (success) { + getEditLog().logSync(); + } + if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(success, "removePathCacheEntries", null, null, null); } - RetryCache.setState(retryCacheEntry, success, results); } - getEditLog().logSync(); return results; } - List listPathCacheEntries(long startId, String pool, - int maxReplies) throws IOException { - checkOperation(OperationCategory.READ); - readLock(); - try { - checkOperation(OperationCategory.READ); - return cacheManager.listPathCacheEntries(startId, pool, maxReplies); - } finally { - readUnlock(); - } + List listPathCacheEntries(long startId, + Long poolId, int maxReplies) throws IOException { + LOG.info("listPathCacheEntries with " + startId + " " + poolId); + final FSPermissionChecker pc = getPermissionChecker(); + return cacheManager.listPathCacheEntries(pc, startId, poolId, maxReplies); } - public void addCachePool(CachePoolInfo req) throws IOException { + public CachePool addCachePool(CachePoolInfo req) throws IOException { final FSPermissionChecker pc = getPermissionChecker(); - CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + CacheEntryWithPayload cacheEntry = + RetryCache.waitForCompletion(retryCache, null); if (cacheEntry != null && cacheEntry.isSuccess()) { - return; // Return previous response + return (CachePool)cacheEntry.getPayload(); // Return previous response } - checkOperation(OperationCategory.WRITE); writeLock(); - boolean success = false; + CachePool pool = null; try { checkOperation(OperationCategory.WRITE); if (!pc.isSuperUser()) { @@ -6845,29 +6836,28 @@ public void addCachePool(CachePoolInfo req) throws IOException { throw new SafeModeException( "Cannot add cache pool " + req.getPoolName(), safeMode); } - cacheManager.addCachePool(req); + pool = cacheManager.addCachePool(req); + RetryCache.setState(cacheEntry, true); //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 - success = true; } finally { writeUnlock(); - if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addCachePool", req.getPoolName(), null, null); - } - RetryCache.setState(cacheEntry, success); } - + getEditLog().logSync(); + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(true, "addCachePool", req.getPoolName(), null, null); + } + return pool; } - public void modifyCachePool(CachePoolInfo req) throws IOException { + public void modifyCachePool(long poolId, CachePoolInfo info) + throws IOException { final FSPermissionChecker pc = getPermissionChecker(); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; // Return previous response } - checkOperation(OperationCategory.WRITE); writeLock(); - boolean success = false; try { checkOperation(OperationCategory.WRITE); if (!pc.isSuperUser()) { @@ -6876,64 +6866,62 @@ public void modifyCachePool(CachePoolInfo req) throws IOException { } if (isInSafeMode()) { throw new SafeModeException( - "Cannot modify cache pool " + req.getPoolName(), safeMode); + "Cannot modify cache pool " + info.getPoolName(), safeMode); } - cacheManager.modifyCachePool(req); + cacheManager.modifyCachePool(poolId, info); + RetryCache.setState(cacheEntry, true); //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 - success = true; } finally { writeUnlock(); - if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null); - } - RetryCache.setState(cacheEntry, success); } getEditLog().logSync(); + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(true, "modifyCachePool", info.getPoolName(), null, null); + } } - public void removeCachePool(String cachePoolName) throws IOException { + public void removeCachePool(long poolId) throws IOException { final FSPermissionChecker pc = getPermissionChecker(); - CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); - if (cacheEntry != null && cacheEntry.isSuccess()) { - return; // Return previous response - } - checkOperation(OperationCategory.WRITE); writeLock(); - boolean success = false; + CachePool pool; try { checkOperation(OperationCategory.WRITE); if (!pc.isSuperUser()) { throw new AccessControlException("Non-super users cannot " + "remove cache pools."); } + pool = cacheManager.getCachePool(poolId); if (isInSafeMode()) { + String identifier; + if (pool == null) { + identifier = "with id " + Long.toString(poolId); + } else { + identifier = pool.getInfo().getPoolName(); + } throw new SafeModeException( - "Cannot remove cache pool " + cachePoolName, safeMode); + "Cannot remove cache pool " + identifier, safeMode); } - cacheManager.removeCachePool(cachePoolName); + cacheManager.removeCachePool(poolId); //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 - success = true; } finally { writeUnlock(); - if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removeCachePool", cachePoolName, null, null); - } - RetryCache.setState(cacheEntry, success); } - + getEditLog().logSync(); + if (auditLog.isInfoEnabled() && isExternalInvocation()) { + logAuditEvent(true, "removeCachePool", pool.getInfo().getPoolName(), + null, null); + } } - public List listCachePools(String prevKey, + public List listCachePools(long prevKey, int maxRepliesPerRequest) throws IOException { - final FSPermissionChecker pc = getPermissionChecker(); - List results; - checkOperation(OperationCategory.READ); + List results; readLock(); try { checkOperation(OperationCategory.READ); - results = cacheManager.listCachePools(pc, prevKey, maxRepliesPerRequest); + results = cacheManager.listCachePools(prevKey, maxRepliesPerRequest); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index c516a73e57f..54f7463014e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -29,6 +28,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -257,38 +257,29 @@ private void checkStickyBit(INode parent, INode inode, Snapshot snapshot } /** - * Check if this CachePool can be accessed. + * Whether a cache pool can be accessed by the current context * - * @param pc - * Permission checker object with user name and groups. - * @param write - * True if we care about write access; false otherwise. - * @return - * True only if the cache pool is accessible. + * @param pool CachePool being accessed + * @param access type of action being performed on the cache pool + * @return if the pool can be accessed */ - private boolean checkPermission(String userName, - String groupName, int mode, int mask) { - if ((mode & mask) != 0) { + public boolean checkPermission(CachePool pool, FsAction access) { + CachePoolInfo info = pool.getInfo(); + FsPermission mode = info.getMode(); + if (isSuperUser()) { return true; } - if (((mode & (mask << 6)) != 0) - && (getUser().equals(userName))) { + if (user.equals(info.getOwnerName()) + && mode.getUserAction().implies(access)) { return true; } - if (((mode & (mask << 6)) != 0) - && (containsGroup(groupName))) { + if (groups.contains(info.getGroupName()) + && mode.getGroupAction().implies(access)) { + return true; + } + if (mode.getOtherAction().implies(access)) { return true; } return false; } - - public boolean checkWritePermission(String userName, - String groupName, int mode) { - return checkPermission(userName, groupName, mode, 02); - } - - public boolean checkReadPermission(String userName, - String groupName, int mode) { - return checkPermission(userName, groupName, mode, 04); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 9eb09bb43af..f5f85d124fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -31,13 +31,11 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; -import java.util.NoSuchElementException; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator; -import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -62,9 +60,9 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -1225,72 +1223,73 @@ public List> removePathCacheEntries(List ids) private class ServerSidePathCacheEntriesIterator extends BatchedRemoteIterator { - private final String pool; + private final Long poolId; public ServerSidePathCacheEntriesIterator(Long firstKey, - int maxRepliesPerRequest, String pool) { + int maxRepliesPerRequest, Long poolId) { super(firstKey, maxRepliesPerRequest); - this.pool = pool; + this.poolId = poolId; } @Override public BatchedEntries makeRequest( - Long nextKey, int maxRepliesPerRequest) throws IOException { + Long prevKey, int maxRepliesPerRequest) throws IOException { return new BatchedListEntries( - namesystem.listPathCacheEntries(nextKey, pool, + namesystem.listPathCacheEntries(prevKey, poolId, maxRepliesPerRequest)); } @Override - public Long elementToNextKey(PathCacheEntry entry) { + public Long elementToPrevKey(PathCacheEntry entry) { return entry.getEntryId(); } } - + @Override - public RemoteIterator listPathCacheEntries(long prevId, String pool, - int maxReplies) throws IOException { - return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, pool); + public RemoteIterator listPathCacheEntries(long prevId, + long poolId, int maxReplies) throws IOException { + return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, poolId); } @Override - public void addCachePool(CachePoolInfo info) throws IOException { - namesystem.addCachePool(info); + public CachePool addCachePool(CachePoolInfo info) throws IOException { + return namesystem.addCachePool(info); } @Override - public void modifyCachePool(CachePoolInfo info) throws IOException { - namesystem.modifyCachePool(info); + public void modifyCachePool(long poolId, CachePoolInfo info) + throws IOException { + namesystem.modifyCachePool(poolId, info); } @Override - public void removeCachePool(String cachePoolName) throws IOException { - namesystem.removeCachePool(cachePoolName); + public void removeCachePool(long poolId) throws IOException { + namesystem.removeCachePool(poolId); } private class ServerSideCachePoolIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { - public ServerSideCachePoolIterator(String prevKey, int maxRepliesPerRequest) { - super(prevKey, maxRepliesPerRequest); + public ServerSideCachePoolIterator(long prevId, int maxRepliesPerRequest) { + super(prevId, maxRepliesPerRequest); } @Override - public BatchedEntries makeRequest(String prevKey, + public BatchedEntries makeRequest(Long prevId, int maxRepliesPerRequest) throws IOException { - return new BatchedListEntries( - namesystem.listCachePools(prevKey, maxRepliesPerRequest)); + return new BatchedListEntries( + namesystem.listCachePools(prevId, maxRepliesPerRequest)); } @Override - public String elementToNextKey(CachePoolInfo element) { - return element.getPoolName(); + public Long elementToPrevKey(CachePool element) { + return element.getId(); } } @Override - public RemoteIterator listCachePools(String prevKey, + public RemoteIterator listCachePools(long prevPoolId, int maxRepliesPerRequest) throws IOException { - return new ServerSideCachePoolIterator(prevKey, maxRepliesPerRequest); + return new ServerSideCachePoolIterator(prevPoolId, maxRepliesPerRequest); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index e799ebf413a..f196a7074ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -363,9 +363,27 @@ message IsFileClosedResponseProto { required bool result = 1; } +message CachePoolInfoProto { + optional string poolName = 1; + optional string ownerName = 2; + optional string groupName = 3; + optional int32 mode = 4; + optional int32 weight = 5; +} + +message CachePoolProto { + optional int64 id = 1; + optional CachePoolInfoProto info = 2; +} + message PathCacheDirectiveProto { required string path = 1; - required string pool = 2; + required CachePoolProto pool = 2; +} + +message PathCacheEntryProto { + required int64 id = 1; + optional PathCacheDirectiveProto directive = 2; } message AddPathCacheDirectivesRequestProto { @@ -399,53 +417,41 @@ enum RemovePathCacheEntryErrorProto { } message ListPathCacheEntriesRequestProto { - required int64 prevId = 1; - required string pool = 2; + required PathCacheEntryProto prevEntry = 1; + required CachePoolProto pool = 2; optional int32 maxReplies = 3; } -message ListPathCacheEntriesElementProto { - required int64 id = 1; - required string path = 2; - required string pool = 3; -} - message ListPathCacheEntriesResponseProto { - repeated ListPathCacheEntriesElementProto elements = 1; + repeated PathCacheEntryProto entries = 1; required bool hasMore = 2; } message AddCachePoolRequestProto { - required string poolName = 1; - optional string ownerName = 2; - optional string groupName = 3; - optional int32 mode = 4; - optional int32 weight = 5; + required CachePoolInfoProto info = 1; } -message AddCachePoolResponseProto { // void response +message AddCachePoolResponseProto { + required CachePoolProto pool = 1; } message ModifyCachePoolRequestProto { - required string poolName = 1; - optional string ownerName = 2; - optional string groupName = 3; - optional int32 mode = 4; - optional int32 weight = 5; + required CachePoolProto pool = 1; + required CachePoolInfoProto info = 2; } message ModifyCachePoolResponseProto { // void response } message RemoveCachePoolRequestProto { - required string poolName = 1; + required CachePoolProto pool = 1; } message RemoveCachePoolResponseProto { // void response } message ListCachePoolsRequestProto { - required string prevPoolName = 1; + required CachePoolProto prevPool = 1; required int32 maxReplies = 2; } @@ -455,11 +461,7 @@ message ListCachePoolsResponseProto { } message ListCachePoolsResponseElementProto { - required string poolName = 1; - required string ownerName = 2; - required string groupName = 3; - required int32 mode = 4; - required int32 weight = 5; + required CachePoolProto pool = 1; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 6d67670783c..e35a480f0e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -92,6 +92,9 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { + if (fs != null) { + fs.close(); + } if (cluster != null) { cluster.shutdown(); } @@ -159,13 +162,11 @@ private static long[] getBlockSizes(HdfsBlockLocation[] locs) } /** - * Blocks until cache usage changes from the current value, then verifies - * against the expected new value. + * Blocks until cache usage hits the expected new value. */ - private long verifyExpectedCacheUsage(final long current, - final long expected) throws Exception { + private long verifyExpectedCacheUsage(final long expected) throws Exception { long cacheUsed = fsd.getCacheUsed(); - while (cacheUsed == current) { + while (cacheUsed != expected) { cacheUsed = fsd.getCacheUsed(); Thread.sleep(100); } @@ -202,13 +203,13 @@ public void testCacheAndUncacheBlock() throws Exception { // Cache each block in succession, checking each time for (int i=0; i iter = proto.listCachePools("", 1); - CachePoolInfo info = iter.next(); - assertEquals("pool1", info.getPoolName()); - assertEquals("def", info.getOwnerName()); - assertEquals("456", info.getGroupName()); - assertEquals(Integer.valueOf(0755), info.getMode()); - assertEquals(Integer.valueOf(150), info.getWeight()); + // Create a new pool + CachePoolInfo info = CachePoolInfo.newBuilder(). + setPoolName("pool1"). + setOwnerName("abc"). + setGroupName("123"). + setMode(new FsPermission((short)0755)). + setWeight(150). + build(); + CachePool pool = proto.addCachePool(info); + CachePoolInfo actualInfo = pool.getInfo(); + assertEquals("Expected info to match create time settings", + info, actualInfo); + // Modify the pool + info = CachePoolInfo.newBuilder(). + setPoolName("pool2"). + setOwnerName("def"). + setGroupName("456"). + setMode(new FsPermission((short)0644)). + setWeight(200). + build(); + proto.modifyCachePool(pool.getId(), info); + // Check via listing this time + RemoteIterator iter = proto.listCachePools(0, 1); + CachePool listedPool = iter.next(); + actualInfo = listedPool.getInfo(); + assertEquals("Expected info to match modified settings", info, actualInfo); try { - proto.removeCachePool("pool99"); + proto.removeCachePool(808); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { } - proto.removeCachePool("pool1"); + proto.removeCachePool(pool.getId()); try { - proto.removeCachePool("pool1"); + proto.removeCachePool(pool.getId()); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { @@ -121,13 +142,13 @@ private static void validateListAll( RemoteIterator iter, long id0, long id1, long id2) throws Exception { Assert.assertEquals(new PathCacheEntry(id0, - new PathCacheDirective("/alpha", "pool1")), + new PathCacheDirective("/alpha", 1)), iter.next()); Assert.assertEquals(new PathCacheEntry(id1, - new PathCacheDirective("/beta", "pool2")), + new PathCacheDirective("/beta", 2)), iter.next()); Assert.assertEquals(new PathCacheEntry(id2, - new PathCacheDirective("/gamma", "pool1")), + new PathCacheDirective("/gamma", 1)), iter.next()); Assert.assertFalse(iter.hasNext()); } @@ -140,23 +161,36 @@ public void testSetAndGet() throws Exception { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - NamenodeProtocols proto = cluster.getNameNodeRpc(); - proto.addCachePool(new CachePoolInfo("pool1")); - proto.addCachePool(new CachePoolInfo("pool2")); - proto.addCachePool(new CachePoolInfo("pool3")); - proto.addCachePool(new CachePoolInfo("pool4").setMode(0)); - List> addResults1 = - proto.addPathCacheDirectives(Arrays.asList( - new PathCacheDirective[] { - new PathCacheDirective("/alpha", "pool1"), - new PathCacheDirective("/beta", "pool2"), - new PathCacheDirective("", "pool3"), - new PathCacheDirective("/zeta", "nonexistent_pool"), - new PathCacheDirective("/zeta", "pool4") - })); + final CachePool pool1 = proto.addCachePool(new CachePoolInfo("pool1")); + final CachePool pool2 = proto.addCachePool(new CachePoolInfo("pool2")); + final CachePool pool3 = proto.addCachePool(new CachePoolInfo("pool3")); + final CachePool pool4 = proto.addCachePool(CachePoolInfo.newBuilder() + .setPoolName("pool4") + .setMode(new FsPermission((short)0)).build()); + UserGroupInformation testUgi = UserGroupInformation + .createUserForTesting("myuser", new String[]{"mygroup"}); + List> addResults1 = testUgi.doAs( + new PrivilegedExceptionAction>>() { + @Override + public List> run() throws IOException { + List> entries; + entries = proto.addPathCacheDirectives( + Arrays.asList(new PathCacheDirective[] { + new PathCacheDirective("/alpha", pool1.getId()), + new PathCacheDirective("/beta", pool2.getId()), + new PathCacheDirective("", pool3.getId()), + new PathCacheDirective("/zeta", 404), + new PathCacheDirective("/zeta", pool4.getId()) + })); + return entries; + } + }); + // Save the successful additions long ids1[] = new long[2]; - ids1[0] = addResults1.get(0).get().getEntryId(); - ids1[1] = addResults1.get(1).get().getEntryId(); + for (int i=0; i<2; i++) { + ids1[i] = addResults1.get(i).get().getEntryId(); + } + // Verify that the unsuccessful additions failed properly try { addResults1.get(2).get(); Assert.fail("expected an error when adding an empty path"); @@ -167,7 +201,7 @@ public void testSetAndGet() throws Exception { addResults1.get(3).get(); Assert.fail("expected an error when adding to a nonexistent pool."); } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); } try { addResults1.get(4).get(); @@ -181,10 +215,10 @@ public void testSetAndGet() throws Exception { List> addResults2 = proto.addPathCacheDirectives(Arrays.asList( new PathCacheDirective[] { - new PathCacheDirective("/alpha", "pool1"), - new PathCacheDirective("/theta", ""), - new PathCacheDirective("bogus", "pool1"), - new PathCacheDirective("/gamma", "pool1") + new PathCacheDirective("/alpha", pool1.getId()), + new PathCacheDirective("/theta", 404), + new PathCacheDirective("bogus", pool1.getId()), + new PathCacheDirective("/gamma", pool1.getId()) })); long id = addResults2.get(0).get().getEntryId(); Assert.assertEquals("expected to get back the same ID as last time " + @@ -194,7 +228,7 @@ public void testSetAndGet() throws Exception { Assert.fail("expected an error when adding a path cache " + "directive with an empty pool name."); } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); } try { addResults2.get(2).get(); @@ -206,14 +240,16 @@ public void testSetAndGet() throws Exception { long ids2[] = new long[1]; ids2[0] = addResults2.get(3).get().getEntryId(); + // Validate listing all entries RemoteIterator iter = - proto.listPathCacheEntries(0, "", 100); + proto.listPathCacheEntries(-1l, -1l, 100); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathCacheEntries(0, "", 1); + iter = proto.listPathCacheEntries(-1l, -1l, 1); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathCacheEntries(0, "pool3", 1); + // Validate listing certain pools + iter = proto.listPathCacheEntries(0, pool3.getId(), 1); Assert.assertFalse(iter.hasNext()); - iter = proto.listPathCacheEntries(0, "pool2", 4444); + iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); Assert.assertEquals(addResults1.get(1).get(), iter.next()); Assert.assertFalse(iter.hasNext()); @@ -235,7 +271,7 @@ public void testSetAndGet() throws Exception { } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); } - iter = proto.listPathCacheEntries(0, "pool2", 4444); + iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); Assert.assertFalse(iter.hasNext()); } finally { if (cluster != null) { cluster.shutdown(); } From f41f8b8842c3f26d19f7fa928070c7c07f760e4c Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 6 Sep 2013 18:52:50 +0000 Subject: [PATCH 12/51] HDFS-5163. Miscellaneous cache pool RPC fixes (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520665 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/fs/BatchedRemoteIterator.java | 26 +- .../hadoop/fs/permission/FsPermission.java | 7 + .../hadoop/security/UserGroupInformation.java | 8 + .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 7 + .../AddPathCacheDirectiveException.java | 12 +- .../hadoop/hdfs/protocol/CachePoolInfo.java | 169 +++++------- .../hadoop/hdfs/protocol/ClientProtocol.java | 50 ++-- .../hdfs/protocol/PathCacheDirective.java | 25 +- ...amenodeProtocolServerSideTranslatorPB.java | 97 ++++--- .../ClientNamenodeProtocolTranslatorPB.java | 172 ++++++++----- .../hadoop/hdfs/protocolPB/PBHelper.java | 78 ------ .../hdfs/server/namenode/CacheManager.java | 240 +++++++++--------- .../hdfs/server/namenode/CachePool.java | 160 ++++++------ .../hdfs/server/namenode/FSNamesystem.java | 211 ++++++++------- .../server/namenode/FSPermissionChecker.java | 9 +- .../server/namenode/NameNodeRpcServer.java | 61 +++-- .../main/proto/ClientNamenodeProtocol.proto | 60 ++--- .../namenode/TestPathCacheRequests.java | 190 +++++++------- 19 files changed, 788 insertions(+), 797 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java index 42100d83e09..1e9398b45c2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java @@ -28,13 +28,16 @@ public abstract class BatchedRemoteIterator implements RemoteIterator { public interface BatchedEntries { public E get(int i); public int size(); + public boolean hasMore(); } public static class BatchedListEntries implements BatchedEntries { private final List entries; + private final boolean hasMore; - public BatchedListEntries(List entries) { + public BatchedListEntries(List entries, boolean hasMore) { this.entries = entries; + this.hasMore = hasMore; } public E get(int i) { @@ -44,16 +47,18 @@ public E get(int i) { public int size() { return entries.size(); } + + public boolean hasMore() { + return hasMore; + } } private K prevKey; - private final int maxRepliesPerRequest; private BatchedEntries entries; private int idx; - public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) { + public BatchedRemoteIterator(K prevKey) { this.prevKey = prevKey; - this.maxRepliesPerRequest = maxRepliesPerRequest; this.entries = null; this.idx = -1; } @@ -62,21 +67,14 @@ public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) { * Perform the actual remote request. * * @param key The key to send. - * @param maxRepliesPerRequest The maximum number of replies to allow. * @return A list of replies. */ - public abstract BatchedEntries makeRequest(K prevKey, - int maxRepliesPerRequest) throws IOException; + public abstract BatchedEntries makeRequest(K prevKey) throws IOException; private void makeRequest() throws IOException { idx = 0; entries = null; - entries = makeRequest(prevKey, maxRepliesPerRequest); - if (entries.size() > maxRepliesPerRequest) { - throw new IOException("invalid number of replies returned: got " + - entries.size() + ", expected " + maxRepliesPerRequest + - " at most."); - } + entries = makeRequest(prevKey); if (entries.size() == 0) { entries = null; } @@ -86,7 +84,7 @@ private void makeRequestIfNeeded() throws IOException { if (idx == -1) { makeRequest(); } else if ((entries != null) && (idx >= entries.size())) { - if (entries.size() < maxRepliesPerRequest) { + if (!entries.hasMore()) { // Last time, we got fewer entries than requested. // So we should be at the end. entries = null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java index 3db9acb2e22..9a5d9166894 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java @@ -303,6 +303,13 @@ public static FsPermission getFileDefault() { return new FsPermission((short)00666); } + /** + * Get the default permission for cache pools. + */ + public static FsPermission getCachePoolDefault() { + return new FsPermission((short)00755); + } + /** * Create a FsPermission from a Unix symbolic permission string * @param unixSymbolicPermission e.g. "-rw-rw-rw-" diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 1594ffe0ea8..1d5305da5ee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -1253,6 +1253,14 @@ public String getShortUserName() { return null; } + public String getPrimaryGroupName() throws IOException { + String[] groups = getGroupNames(); + if (groups.length == 0) { + throw new IOException("There is no primary group for UGI " + this); + } + return groups[0]; + } + /** * Get the user's full principal name. * @return the user's full principal name. diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index acc949680f4..97777dfb280 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -24,6 +24,9 @@ HDFS-4949 (Unreleased) HDFS-5121. Add RPCs for creating and manipulating cache pools. (Contributed by Colin Patrick McCabe) + HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin + Patrick McCabe) + OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9302fdd80ec..78293c22d69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -195,6 +195,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; + public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES = + "dfs.namenode.list.cache.pools.num.responses"; + public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100; + public static final String DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES = + "dfs.namenode.list.cache.directives.num.responses"; + public static final int DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT = 100; + // Whether to enable datanode's stale state detection and usage for reads public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java index 0972302cd26..e162463d8d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java @@ -56,12 +56,12 @@ public InvalidPathNameError(PathCacheDirective directive) { } } - public static class InvalidPoolError + public static class InvalidPoolNameError extends AddPathCacheDirectiveException { private static final long serialVersionUID = 1L; - public InvalidPoolError(PathCacheDirective directive) { - super("invalid pool id " + directive.getPoolId(), directive); + public InvalidPoolNameError(PathCacheDirective directive) { + super("invalid pool name '" + directive.getPool() + "'", directive); } } @@ -70,7 +70,7 @@ public static class PoolWritePermissionDeniedError private static final long serialVersionUID = 1L; public PoolWritePermissionDeniedError(PathCacheDirective directive) { - super("write permission denied for pool id " + directive.getPoolId(), + super("write permission denied for pool '" + directive.getPool() + "'", directive); } } @@ -82,9 +82,7 @@ public static class UnexpectedAddPathCacheDirectiveException public UnexpectedAddPathCacheDirectiveException( PathCacheDirective directive) { super("encountered an unexpected error when trying to " + - "add path cache directive to pool id " + directive.getPoolId() + - " " + directive, - directive); + "add path cache directive " + directive, directive); } } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index cf05816c7f6..6641cd29004 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -18,45 +18,38 @@ package org.apache.hadoop.hdfs.protocol; +import javax.annotation.Nullable; + import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsPermission; -import com.google.common.base.Preconditions; - /** * Information about a cache pool. - * - * CachePoolInfo permissions roughly map to Unix file permissions. - * Write permissions allow addition and removal of a {@link PathCacheEntry} from - * the pool. Execute permissions allow listing of PathCacheEntries in a pool. - * Read permissions have no associated meaning. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CachePoolInfo { + final String poolName; - private String poolName; - private String ownerName; - private String groupName; - private FsPermission mode; - private Integer weight; + @Nullable + String ownerName; - /** - * For Builder use - */ - private CachePoolInfo() {} + @Nullable + String groupName; + + @Nullable + FsPermission mode; + + @Nullable + Integer weight; - /** - * Use a CachePoolInfo {@link Builder} to create a new CachePoolInfo with - * more parameters - */ public CachePoolInfo(String poolName) { this.poolName = poolName; } - + public String getPoolName() { return poolName; } @@ -65,103 +58,73 @@ public String getOwnerName() { return ownerName; } + public CachePoolInfo setOwnerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + public String getGroupName() { return groupName; } + public CachePoolInfo setGroupName(String groupName) { + this.groupName = groupName; + return this; + } + public FsPermission getMode() { return mode; } + public CachePoolInfo setMode(FsPermission mode) { + this.mode = mode; + return this; + } + public Integer getWeight() { return weight; } + public CachePoolInfo setWeight(Integer weight) { + this.weight = weight; + return this; + } + public String toString() { - return new StringBuilder(). - append("{ ").append("poolName:").append(poolName). - append(", ownerName:").append(ownerName). - append(", groupName:").append(groupName). - append(", mode:").append(mode). - append(", weight:").append(weight). - append(" }").toString(); + return new StringBuilder().append("{"). + append("poolName:").append(poolName). + append(", ownerName:").append(ownerName). + append(", groupName:").append(groupName). + append(", mode:").append((mode == null) ? "null" : + String.format("0%03o", mode)). + append(", weight:").append(weight). + append("}").toString(); + } + + @Override + public boolean equals(Object o) { + try { + CachePoolInfo other = (CachePoolInfo)o; + return new EqualsBuilder(). + append(poolName, other.poolName). + append(ownerName, other.ownerName). + append(groupName, other.groupName). + append(mode, other.mode). + append(weight, other.weight). + isEquals(); + } catch (ClassCastException e) { + return false; + } } @Override public int hashCode() { - return new HashCodeBuilder().append(poolName).append(ownerName) - .append(groupName).append(mode.toShort()).append(weight).hashCode(); + return new HashCodeBuilder(). + append(poolName). + append(ownerName). + append(groupName). + append(mode). + append(weight). + hashCode(); } - - @Override - public boolean equals(Object obj) { - if (obj == null) { return false; } - if (obj == this) { return true; } - if (obj.getClass() != getClass()) { - return false; - } - CachePoolInfo rhs = (CachePoolInfo)obj; - return new EqualsBuilder() - .append(poolName, rhs.poolName) - .append(ownerName, rhs.ownerName) - .append(groupName, rhs.groupName) - .append(mode, rhs.mode) - .append(weight, rhs.weight) - .isEquals(); - } - - public static Builder newBuilder() { - return new Builder(); - } - - public static Builder newBuilder(CachePoolInfo info) { - return new Builder(info); - } - - /** - * CachePoolInfo Builder - */ - public static class Builder { - private CachePoolInfo info; - - public Builder() { - this.info = new CachePoolInfo(); - } - - public Builder(CachePoolInfo info) { - this.info = info; - } - - public CachePoolInfo build() { - Preconditions.checkNotNull(info.poolName, - "Cannot create a CachePoolInfo without a pool name"); - return info; - } - - public Builder setPoolName(String poolName) { - info.poolName = poolName; - return this; - } - - public Builder setOwnerName(String ownerName) { - info.ownerName = ownerName; - return this; - } - - public Builder setGroupName(String groupName) { - info.groupName = groupName; - return this; - } - - public Builder setMode(FsPermission mode) { - info.mode = mode; - return this; - } - - public Builder setWeight(Integer weight) { - info.weight = weight; - return this; - } - } - -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index df63b70256d..45b041ffb4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; -import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -1107,8 +1106,9 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, * could not be added. */ @AtMostOnce - public List> addPathCacheDirectives( - List directives) throws IOException; + public List> + addPathCacheDirectives(List directives) + throws IOException; /** * Remove some path cache entries from the CacheManager. @@ -1117,7 +1117,7 @@ public List> addPathCacheDirectives( * @return A Fallible list where each element is either a successfully removed * ID, or an IOException describing why the ID could not be removed. */ - @Idempotent + @AtMostOnce public List> removePathCacheEntries(List ids) throws IOException; @@ -1127,15 +1127,13 @@ public List> removePathCacheEntries(List ids) * * @param prevId The last listed entry ID, or -1 if this is the first call to * listPathCacheEntries. - * @param pool The cache pool to list, or -1 to list all pools - * @param maxRepliesPerRequest The maximum number of entries to return per - * request + * @param pool The cache pool to list, or the empty string to list all pools * @return A RemoteIterator which returns PathCacheEntry objects. */ @Idempotent public RemoteIterator listPathCacheEntries(long prevId, - long poolId, int maxRepliesPerRequest) throws IOException; - + String pool) throws IOException; + /** * Add a new cache pool. * @@ -1143,39 +1141,37 @@ public RemoteIterator listPathCacheEntries(long prevId, * @throws IOException If the request could not be completed. */ @AtMostOnce - public CachePool addCachePool(CachePoolInfo info) throws IOException; + public void addCachePool(CachePoolInfo info) throws IOException; /** - * Modify a cache pool, e.g. pool name, permissions, owner, group. - * - * @param poolId ID of the cache pool to modify - * @param info New metadata for the cache pool - * @throws IOException If the request could not be completed. + * Modify a cache pool. + * + * @param req + * The request to modify a cache pool. + * @throws IOException + * If the request could not be completed. */ @AtMostOnce - public void modifyCachePool(long poolId, CachePoolInfo info) - throws IOException; - + public void modifyCachePool(CachePoolInfo req) throws IOException; + /** * Remove a cache pool. * - * @param poolId ID of the cache pool to remove. + * @param pool name of the cache pool to remove. * @throws IOException if the cache pool did not exist, or could not be * removed. */ - @Idempotent - public void removeCachePool(long poolId) throws IOException; + @AtMostOnce + public void removeCachePool(String pool) throws IOException; /** * List the set of cache pools. Incrementally fetches results from the server. * - * @param prevPoolId ID of the last pool listed, or -1 if this is the first - * invocation of listCachePools - * @param maxRepliesPerRequest Maximum number of cache pools to return per - * server request. + * @param prevPool name of the last pool listed, or the empty string if this is + * the first invocation of listCachePools * @return A RemoteIterator which returns CachePool objects. */ @Idempotent - public RemoteIterator listCachePools(long prevPoolId, - int maxRepliesPerRequest) throws IOException; + public RemoteIterator listCachePools(String prevPool) + throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java index cab8dc45f28..8c6d742d4cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java @@ -25,7 +25,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; /** @@ -33,13 +33,14 @@ */ public class PathCacheDirective implements Comparable { private final String path; - private final long poolId; - public PathCacheDirective(String path, long poolId) { + private final String pool; + + public PathCacheDirective(String path, String pool) { Preconditions.checkNotNull(path); - Preconditions.checkArgument(poolId > 0); + Preconditions.checkNotNull(pool); this.path = path; - this.poolId = poolId; + this.pool = pool; } /** @@ -52,8 +53,8 @@ public String getPath() { /** * @return The pool used in this request. */ - public long getPoolId() { - return poolId; + public String getPool() { + return pool; } /** @@ -69,22 +70,22 @@ public void validate() throws IOException { if (!DFSUtil.isValidName(path)) { throw new InvalidPathNameError(this); } - if (poolId <= 0) { - throw new InvalidPoolError(this); + if (pool.isEmpty()) { + throw new InvalidPoolNameError(this); } } @Override public int compareTo(PathCacheDirective rhs) { return ComparisonChain.start(). - compare(poolId, rhs.getPoolId()). + compare(pool, rhs.getPool()). compare(path, rhs.getPath()). result(); } @Override public int hashCode() { - return new HashCodeBuilder().append(path).append(poolId).hashCode(); + return new HashCodeBuilder().append(path).append(pool).hashCode(); } @Override @@ -101,7 +102,7 @@ public boolean equals(Object o) { public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ path:").append(path). - append(", poolId:").append(poolId). + append(", pool:").append(pool). append(" }"); return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index d31162497bf..857b36faa51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -27,9 +27,11 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -112,6 +114,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; @@ -171,6 +174,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; @@ -1035,19 +1039,16 @@ public IsFileClosedResponseProto isFileClosed( } @Override - public AddPathCacheDirectivesResponseProto addPathCacheDirectives( - RpcController controller, AddPathCacheDirectivesRequestProto request) - throws ServiceException { + public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller, + AddPathCacheDirectivesRequestProto request) throws ServiceException { try { ArrayList input = new ArrayList(request.getElementsCount()); for (int i = 0; i < request.getElementsCount(); i++) { PathCacheDirectiveProto proto = request.getElements(i); - input.add(new PathCacheDirective(proto.getPath(), - proto.getPool().getId())); + input.add(new PathCacheDirective(proto.getPath(), proto.getPool())); } - List> output = server - .addPathCacheDirectives(input); + List> output = server.addPathCacheDirectives(input); AddPathCacheDirectivesResponseProto.Builder builder = AddPathCacheDirectivesResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { @@ -1060,7 +1061,7 @@ public AddPathCacheDirectivesResponseProto addPathCacheDirectives( } catch (InvalidPathNameError ioe) { builder.addResults(AddPathCacheDirectiveErrorProto. INVALID_PATH_NAME_ERROR_VALUE); - } catch (InvalidPoolError ioe) { + } catch (InvalidPoolNameError ioe) { builder.addResults(AddPathCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE); } catch (IOException ioe) { @@ -1108,21 +1109,20 @@ public RemovePathCacheEntriesResponseProto removePathCacheEntries( } @Override - public ListPathCacheEntriesResponseProto listPathCacheEntries( - RpcController controller, ListPathCacheEntriesRequestProto request) - throws ServiceException { + public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller, + ListPathCacheEntriesRequestProto request) throws ServiceException { try { - CachePool pool = PBHelper.convert(request.getPool()); RemoteIterator iter = - server.listPathCacheEntries( - PBHelper.convert(request.getPrevEntry()).getEntryId(), - pool.getId(), - request.getMaxReplies()); + server.listPathCacheEntries(request.getPrevId(), request.getPool()); ListPathCacheEntriesResponseProto.Builder builder = ListPathCacheEntriesResponseProto.newBuilder(); while (iter.hasNext()) { PathCacheEntry entry = iter.next(); - builder.addEntries(PBHelper.convert(entry)); + builder.addElements( + ListPathCacheEntriesElementProto.newBuilder(). + setId(entry.getEntryId()). + setPath(entry.getDirective().getPath()). + setPool(entry.getDirective().getPool())); } return builder.build(); } catch (IOException e) { @@ -1134,20 +1134,46 @@ public ListPathCacheEntriesResponseProto listPathCacheEntries( public AddCachePoolResponseProto addCachePool(RpcController controller, AddCachePoolRequestProto request) throws ServiceException { try { - server.addCachePool(PBHelper.convert(request.getInfo())); + CachePoolInfo info = + new CachePoolInfo(request.getPoolName()); + if (request.hasOwnerName()) { + info.setOwnerName(request.getOwnerName()); + } + if (request.hasGroupName()) { + info.setGroupName(request.getGroupName()); + } + if (request.hasMode()) { + info.setMode(new FsPermission((short)request.getMode())); + } + if (request.hasWeight()) { + info.setWeight(request.getWeight()); + } + server.addCachePool(info); return AddCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); } } - + @Override public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, ModifyCachePoolRequestProto request) throws ServiceException { try { - server.modifyCachePool( - PBHelper.convert(request.getPool()).getId(), - PBHelper.convert(request.getInfo())); + CachePoolInfo info = + new CachePoolInfo(request.getPoolName()); + if (request.hasOwnerName()) { + info.setOwnerName(request.getOwnerName()); + } + if (request.hasGroupName()) { + info.setGroupName(request.getGroupName()); + } + if (request.hasMode()) { + info.setMode(new FsPermission((short)request.getMode())); + } + if (request.hasWeight()) { + info.setWeight(request.getWeight()); + } + server.modifyCachePool(info); return ModifyCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -1158,7 +1184,7 @@ public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, public RemoveCachePoolResponseProto removeCachePool(RpcController controller, RemoveCachePoolRequestProto request) throws ServiceException { try { - server.removeCachePool(PBHelper.convert(request.getPool()).getId()); + server.removeCachePool(request.getPoolName()); return RemoveCachePoolResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -1169,16 +1195,27 @@ public RemoveCachePoolResponseProto removeCachePool(RpcController controller, public ListCachePoolsResponseProto listCachePools(RpcController controller, ListCachePoolsRequestProto request) throws ServiceException { try { - RemoteIterator iter = - server.listCachePools(PBHelper.convert(request.getPrevPool()).getId(), - request.getMaxReplies()); + RemoteIterator iter = + server.listCachePools(request.getPrevPoolName()); ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder(); while (iter.hasNext()) { - CachePool pool = iter.next(); - ListCachePoolsResponseElementProto.Builder elemBuilder = + CachePoolInfo pool = iter.next(); + ListCachePoolsResponseElementProto.Builder elemBuilder = ListCachePoolsResponseElementProto.newBuilder(); - elemBuilder.setPool(PBHelper.convert(pool)); + elemBuilder.setPoolName(pool.getPoolName()); + if (pool.getOwnerName() != null) { + elemBuilder.setOwnerName(pool.getOwnerName()); + } + if (pool.getGroupName() != null) { + elemBuilder.setGroupName(pool.getGroupName()); + } + if (pool.getMode() != null) { + elemBuilder.setMode(pool.getMode().toShort()); + } + if (pool.getWeight() != null) { + elemBuilder.setWeight(pool.getWeight()); + } responseBuilder.addElements(elemBuilder.build()); } return responseBuilder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 9005cc28e98..53912f7a14f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -37,12 +38,17 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; +import org.apache.hadoop.hdfs.protocol.PathCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -55,18 +61,14 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; @@ -107,23 +109,23 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; @@ -144,7 +146,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -1026,7 +1027,7 @@ private static IOException addPathCacheDirectivesError(long code, return new InvalidPathNameError(directive); } else if (code == AddPathCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE) { - return new InvalidPoolError(directive); + return new InvalidPoolNameError(directive); } else { return new UnexpectedAddPathCacheDirectiveException(directive); } @@ -1041,7 +1042,7 @@ public List> addPathCacheDirectives( for (PathCacheDirective directive : directives) { builder.addElements(PathCacheDirectiveProto.newBuilder(). setPath(directive.getPath()). - setPool(PBHelper.convert(new CachePool(directive.getPoolId()))). + setPool(directive.getPool()). build()); } AddPathCacheDirectivesResponseProto result = @@ -1120,40 +1121,45 @@ private static class BatchedPathCacheEntries @Override public PathCacheEntry get(int i) { - PathCacheEntryProto entryProto = response.getEntries(i); - return PBHelper.convert(entryProto); + ListPathCacheEntriesElementProto elementProto = + response.getElements(i); + return new PathCacheEntry(elementProto.getId(), + new PathCacheDirective(elementProto.getPath(), + elementProto.getPool())); } @Override public int size() { - return response.getEntriesCount(); + return response.getElementsCount(); + } + + @Override + public boolean hasMore() { + return response.getHasMore(); } } private class PathCacheEntriesIterator extends BatchedRemoteIterator { - private final long poolId; + private final String pool; - public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest, - long poolId) { - super(prevKey, maxRepliesPerRequest); - this.poolId = poolId; + public PathCacheEntriesIterator(long prevKey, String pool) { + super(prevKey); + this.pool = pool; } @Override public BatchedEntries makeRequest( - Long prevEntryId, int maxRepliesPerRequest) throws IOException { + Long nextKey) throws IOException { ListPathCacheEntriesResponseProto response; try { ListPathCacheEntriesRequestProto req = ListPathCacheEntriesRequestProto.newBuilder(). - setPrevEntry( - PBHelper.convert(new PathCacheEntry(prevEntryId, null))). - setPool(PBHelper.convert(new CachePool(poolId))). - setMaxReplies(maxRepliesPerRequest). + setPrevId(nextKey). + setPool(pool). build(); response = rpcProxy.listPathCacheEntries(null, req); - if (response.getEntriesCount() == 0) { + if (response.getElementsCount() == 0) { response = null; } } catch (ServiceException e) { @@ -1170,30 +1176,51 @@ public Long elementToPrevKey(PathCacheEntry element) { @Override public RemoteIterator listPathCacheEntries(long prevId, - long poolId, int repliesPerRequest) throws IOException { - return new PathCacheEntriesIterator(prevId, repliesPerRequest, poolId); + String pool) throws IOException { + return new PathCacheEntriesIterator(prevId, pool); } @Override - public CachePool addCachePool(CachePoolInfo info) throws IOException { - AddCachePoolRequestProto.Builder builder = + public void addCachePool(CachePoolInfo info) throws IOException { + AddCachePoolRequestProto.Builder builder = AddCachePoolRequestProto.newBuilder(); - builder.setInfo(PBHelper.convert(info)); + builder.setPoolName(info.getPoolName()); + if (info.getOwnerName() != null) { + builder.setOwnerName(info.getOwnerName()); + } + if (info.getGroupName() != null) { + builder.setGroupName(info.getGroupName()); + } + if (info.getMode() != null) { + builder.setMode(info.getMode().toShort()); + } + if (info.getWeight() != null) { + builder.setWeight(info.getWeight()); + } try { - return PBHelper.convert( - rpcProxy.addCachePool(null, builder.build()).getPool()); + rpcProxy.addCachePool(null, builder.build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override - public void modifyCachePool(long poolId, CachePoolInfo info) - throws IOException { - ModifyCachePoolRequestProto.Builder builder = - ModifyCachePoolRequestProto.newBuilder() - .setPool(PBHelper.convert(new CachePool(poolId))) - .setInfo(PBHelper.convert(info)); + public void modifyCachePool(CachePoolInfo req) throws IOException { + ModifyCachePoolRequestProto.Builder builder = + ModifyCachePoolRequestProto.newBuilder(); + builder.setPoolName(req.getPoolName()); + if (req.getOwnerName() != null) { + builder.setOwnerName(req.getOwnerName()); + } + if (req.getGroupName() != null) { + builder.setGroupName(req.getGroupName()); + } + if (req.getMode() != null) { + builder.setMode(req.getMode().toShort()); + } + if (req.getWeight() != null) { + builder.setWeight(req.getWeight()); + } try { rpcProxy.modifyCachePool(null, builder.build()); } catch (ServiceException e) { @@ -1202,69 +1229,74 @@ public void modifyCachePool(long poolId, CachePoolInfo info) } @Override - public void removeCachePool(long poolId) throws IOException { + public void removeCachePool(String cachePoolName) throws IOException { try { - rpcProxy.removeCachePool(null, + rpcProxy.removeCachePool(null, RemoveCachePoolRequestProto.newBuilder(). - setPool(PBHelper.convert(new CachePool(poolId))). - build()); + setPoolName(cachePoolName).build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } private static class BatchedPathDirectiveEntries - implements BatchedEntries { - + implements BatchedEntries { private final ListCachePoolsResponseProto proto; - + public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) { this.proto = proto; } - + @Override - public CachePool get(int i) { + public CachePoolInfo get(int i) { ListCachePoolsResponseElementProto elem = proto.getElements(i); - return PBHelper.convert(elem.getPool()); + return new CachePoolInfo(elem.getPoolName()). + setOwnerName(elem.getOwnerName()). + setGroupName(elem.getGroupName()). + setMode(new FsPermission((short)elem.getMode())). + setWeight(elem.getWeight()); } @Override public int size() { return proto.getElementsCount(); } + + @Override + public boolean hasMore() { + return proto.getHasMore(); + } } + + private class CachePoolIterator + extends BatchedRemoteIterator { - private class CachePoolIterator - extends BatchedRemoteIterator { - - public CachePoolIterator(Long prevKey, int maxRepliesPerRequest) { - super(prevKey, maxRepliesPerRequest); + public CachePoolIterator(String prevKey) { + super(prevKey); } @Override - public BatchedEntries makeRequest(Long prevKey, - int maxRepliesPerRequest) throws IOException { + public BatchedEntries makeRequest(String prevKey) + throws IOException { try { return new BatchedPathDirectiveEntries( - rpcProxy.listCachePools(null, + rpcProxy.listCachePools(null, ListCachePoolsRequestProto.newBuilder(). - setPrevPool(PBHelper.convert(new CachePool(prevKey))). - setMaxReplies(maxRepliesPerRequest). - build())); + setPrevPoolName(prevKey).build())); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override - public Long elementToPrevKey(CachePool element) { - return element.getId(); + public String elementToPrevKey(CachePoolInfo element) { + return element.getPoolName(); } } @Override - public RemoteIterator listCachePools(long prevPoolId, - int maxRepliesPerRequest) throws IOException { - return new CachePoolIterator(prevPoolId, maxRepliesPerRequest); + public RemoteIterator listCachePools(String prevKey) + throws IOException { + return new CachePoolIterator(prevKey); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 862527a0130..4051d01e031 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -32,13 +32,10 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -53,15 +50,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; @@ -123,7 +114,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; @@ -1503,74 +1493,6 @@ public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { return HdfsProtos.ChecksumTypeProto.valueOf(type.id); } - public static PathCacheDirective convert( - PathCacheDirectiveProto directiveProto) { - CachePool pool = convert(directiveProto.getPool()); - return new PathCacheDirective(directiveProto.getPath(), pool.getId()); - } - - public static PathCacheDirectiveProto convert(PathCacheDirective directive) { - PathCacheDirectiveProto.Builder builder = - PathCacheDirectiveProto.newBuilder() - .setPath(directive.getPath()) - .setPool(PBHelper.convert(new CachePool(directive.getPoolId()))); - return builder.build(); - } - - public static PathCacheEntry convert(PathCacheEntryProto entryProto) { - long entryId = entryProto.getId(); - PathCacheDirective directive = convert(entryProto.getDirective()); - return new PathCacheEntry(entryId, directive); - } - - public static PathCacheEntryProto convert(PathCacheEntry entry) { - PathCacheEntryProto.Builder builder = PathCacheEntryProto.newBuilder() - .setId(entry.getEntryId()) - .setDirective(PBHelper.convert(entry.getDirective())); - return builder.build(); - } - - public static CachePoolInfo convert(CachePoolInfoProto infoProto) { - CachePoolInfo.Builder builder = - CachePoolInfo.newBuilder().setPoolName(infoProto.getPoolName()); - if (infoProto.hasOwnerName()) { - builder.setOwnerName(infoProto.getOwnerName()); - } - if (infoProto.hasGroupName()) { - builder.setGroupName(infoProto.getGroupName()); - } - if (infoProto.hasMode()) { - builder.setMode(new FsPermission((short) infoProto.getMode())); - } - if (infoProto.hasWeight()) { - builder.setWeight(infoProto.getWeight()); - } - return builder.build(); - } - - public static CachePoolInfoProto convert(CachePoolInfo info) { - CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder() - .setPoolName(info.getPoolName()) - .setOwnerName(info.getOwnerName()) - .setGroupName(info.getGroupName()) - .setMode(info.getMode().toShort()) - .setWeight(info.getWeight()); - return builder.build(); - } - - public static CachePool convert(CachePoolProto poolProto) { - CachePoolInfo info = convert(poolProto.getInfo()); - CachePool pool = new CachePool(poolProto.getId(), info); - return pool; - } - - public static CachePoolProto convert(CachePool pool) { - CachePoolProto.Builder builder = CachePoolProto.newBuilder() - .setId(pool.getId()) - .setInfo(convert(pool.getInfo())); - return builder.build(); - } - public static InputStream vintPrefixed(final InputStream input) throws IOException { final int firstByte = input.read(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 83834d967e9..53499c86a69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -17,28 +17,34 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT; + import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; +import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.util.Fallible; /** @@ -65,62 +71,58 @@ final class CacheManager { /** * Cache pools, sorted by name. */ - private final TreeMap cachePoolsByName = + private final TreeMap cachePools = new TreeMap(); - /** - * Cache pools, sorted by ID - */ - private final TreeMap cachePoolsById = - new TreeMap(); - /** * The entry ID to use for a new entry. */ private long nextEntryId; /** - * The pool ID to use for a new pool. + * Maximum number of cache pools to list in one operation. */ - private long nextPoolId; + private final int maxListCachePoolsResponses; + + /** + * Maximum number of cache pool directives to list in one operation. + */ + private final int maxListCacheDirectivesResponses; CacheManager(FSDirectory dir, Configuration conf) { // TODO: support loading and storing of the CacheManager state clear(); + maxListCachePoolsResponses = conf.getInt( + DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, + DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT); + maxListCacheDirectivesResponses = conf.getInt( + DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, + DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT); } synchronized void clear() { entriesById.clear(); entriesByDirective.clear(); - cachePoolsByName.clear(); - cachePoolsById.clear(); + cachePools.clear(); nextEntryId = 1; - nextPoolId = 1; } synchronized long getNextEntryId() throws IOException { if (nextEntryId == Long.MAX_VALUE) { - throw new IOException("no more available entry IDs"); + throw new IOException("no more available IDs"); } return nextEntryId++; } - synchronized long getNextPoolId() throws IOException { - if (nextPoolId == Long.MAX_VALUE) { - throw new IOException("no more available pool IDs"); - } - return nextPoolId++; - } - private synchronized Fallible addDirective( - FSPermissionChecker pc, PathCacheDirective directive) { - CachePool pool = cachePoolsById.get(directive.getPoolId()); + PathCacheDirective directive, FSPermissionChecker pc) { + CachePool pool = cachePools.get(directive.getPool()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); return new Fallible( - new InvalidPoolError(directive)); + new InvalidPoolNameError(directive)); } - if (!pc.checkPermission(pool, FsAction.WRITE)) { + if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("addDirective " + directive + ": write permission denied."); return new Fallible( new PoolWritePermissionDeniedError(directive)); @@ -155,17 +157,17 @@ private synchronized Fallible addDirective( } public synchronized List> addDirectives( - FSPermissionChecker pc, List directives) { + List directives, FSPermissionChecker pc) { ArrayList> results = new ArrayList>(directives.size()); for (PathCacheDirective directive: directives) { - results.add(addDirective(pc, directive)); + results.add(addDirective(directive, pc)); } return results; } - private synchronized Fallible removeEntry(FSPermissionChecker pc, - long entryId) { + private synchronized Fallible removeEntry(long entryId, + FSPermissionChecker pc) { // Check for invalid IDs. if (entryId <= 0) { LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); @@ -177,20 +179,20 @@ private synchronized Fallible removeEntry(FSPermissionChecker pc, LOG.info("removeEntry " + entryId + ": entry not found."); return new Fallible(new NoSuchIdException(entryId)); } - CachePool pool = cachePoolsById.get(existing.getDirective().getPoolId()); + CachePool pool = cachePools.get(existing.getDirective().getPool()); if (pool == null) { LOG.info("removeEntry " + entryId + ": pool not found for directive " + existing.getDirective()); return new Fallible( new UnexpectedRemovePathCacheEntryException(entryId)); } - if (!pc.checkPermission(pool, FsAction.WRITE)) { + if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("removeEntry " + entryId + ": write permission denied to " + "pool " + pool + " for entry " + existing); return new Fallible( new RemovePermissionDeniedException(entryId)); } - + // Remove the corresponding entry in entriesByDirective. if (entriesByDirective.remove(existing.getDirective()) == null) { LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + @@ -202,41 +204,43 @@ private synchronized Fallible removeEntry(FSPermissionChecker pc, return new Fallible(entryId); } - public synchronized List> removeEntries(FSPermissionChecker pc, - List entryIds) { + public synchronized List> removeEntries(List entryIds, + FSPermissionChecker pc) { ArrayList> results = new ArrayList>(entryIds.size()); for (Long entryId : entryIds) { - results.add(removeEntry(pc, entryId)); + results.add(removeEntry(entryId, pc)); } return results; } - public synchronized List listPathCacheEntries( - FSPermissionChecker pc, long prevId, Long poolId, int maxReplies) { - final int MAX_PRE_ALLOCATED_ENTRIES = 16; - ArrayList replies = new ArrayList( - Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); + public synchronized BatchedListEntries + listPathCacheEntries(long prevId, String filterPool, FSPermissionChecker pc) { + final int NUM_PRE_ALLOCATED_ENTRIES = 16; + ArrayList replies = + new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); int numReplies = 0; SortedMap tailMap = entriesById.tailMap(prevId + 1); - for (PathCacheEntry entry : tailMap.values()) { - if (numReplies >= maxReplies) { - return replies; + for (Entry cur : tailMap.entrySet()) { + if (numReplies >= maxListCacheDirectivesResponses) { + return new BatchedListEntries(replies, true); } - long entryPoolId = entry.getDirective().getPoolId(); - if (poolId == null || poolId <= 0 || entryPoolId == poolId) { - if (pc.checkPermission( - cachePoolsById.get(entryPoolId), FsAction.EXECUTE)) { - replies.add(entry); - numReplies++; - } + PathCacheEntry curEntry = cur.getValue(); + if (!filterPool.isEmpty() && + !cur.getValue().getDirective().getPool().equals(filterPool)) { + continue; + } + CachePool pool = cachePools.get(curEntry.getDirective().getPool()); + if (pool == null) { + LOG.error("invalid pool for PathCacheEntry " + curEntry); + continue; + } + if (pc.checkPermission(pool, FsAction.EXECUTE)) { + replies.add(cur.getValue()); + numReplies++; } } - return replies; - } - - synchronized CachePool getCachePool(long id) { - return cachePoolsById.get(id); + return new BatchedListEntries(replies, false); } /** @@ -246,24 +250,22 @@ synchronized CachePool getCachePool(long id) { * * @param info * The info for the cache pool to create. - * @return created CachePool */ - public synchronized CachePool addCachePool(CachePoolInfo info) + public synchronized void addCachePool(CachePoolInfo info) throws IOException { String poolName = info.getPoolName(); - if (poolName == null || poolName.isEmpty()) { + if (poolName.isEmpty()) { throw new IOException("invalid empty cache pool name"); } - if (cachePoolsByName.containsKey(poolName)) { + CachePool pool = cachePools.get(poolName); + if (pool != null) { throw new IOException("cache pool " + poolName + " already exists."); } - CachePool cachePool = new CachePool(getNextPoolId(), poolName, + CachePool cachePool = new CachePool(poolName, info.getOwnerName(), info.getGroupName(), info.getMode(), info.getWeight()); - cachePoolsById.put(cachePool.getId(), cachePool); - cachePoolsByName.put(poolName, cachePool); + cachePools.put(poolName, cachePool); LOG.info("created new cache pool " + cachePool); - return cachePool; } /** @@ -274,62 +276,46 @@ public synchronized CachePool addCachePool(CachePoolInfo info) * @param info * The info for the cache pool to modify. */ - public synchronized void modifyCachePool(long poolId, CachePoolInfo info) + public synchronized void modifyCachePool(CachePoolInfo info) throws IOException { - if (poolId <= 0) { - throw new IOException("invalid pool id " + poolId); + String poolName = info.getPoolName(); + if (poolName.isEmpty()) { + throw new IOException("invalid empty cache pool name"); } - if (!cachePoolsById.containsKey(poolId)) { - throw new IOException("cache pool id " + poolId + " does not exist."); + CachePool pool = cachePools.get(poolName); + if (pool == null) { + throw new IOException("cache pool " + poolName + " does not exist."); } - CachePool pool = cachePoolsById.get(poolId); - // Remove the old CachePoolInfo - removeCachePool(poolId); - // Build up the new CachePoolInfo - CachePoolInfo.Builder newInfo = CachePoolInfo.newBuilder(pool.getInfo()); StringBuilder bld = new StringBuilder(); String prefix = ""; - if (info.getPoolName() != null) { - newInfo.setPoolName(info.getPoolName()); - bld.append(prefix). - append("set name to ").append(info.getOwnerName()); - prefix = "; "; - } if (info.getOwnerName() != null) { - newInfo.setOwnerName(info.getOwnerName()); + pool.setOwnerName(info.getOwnerName()); bld.append(prefix). append("set owner to ").append(info.getOwnerName()); prefix = "; "; } if (info.getGroupName() != null) { - newInfo.setGroupName(info.getGroupName()); + pool.setGroupName(info.getGroupName()); bld.append(prefix). append("set group to ").append(info.getGroupName()); prefix = "; "; } if (info.getMode() != null) { - newInfo.setMode(info.getMode()); + pool.setMode(info.getMode()); bld.append(prefix). - append(String.format("set mode to ", info.getMode())); + append(String.format("set mode to 0%3o", info.getMode())); prefix = "; "; } if (info.getWeight() != null) { - newInfo.setWeight(info.getWeight()); + pool.setWeight(info.getWeight()); bld.append(prefix). append("set weight to ").append(info.getWeight()); prefix = "; "; } if (prefix.isEmpty()) { bld.append("no changes."); - } else { - pool.setInfo(newInfo.build()); } - // Put the newly modified info back in - cachePoolsById.put(poolId, pool); - cachePoolsByName.put(info.getPoolName(), pool); - LOG.info("modified pool id " + pool.getId() - + " (" + pool.getInfo().getPoolName() + "); " - + bld.toString()); + LOG.info("modified " + poolName + "; " + bld.toString()); } /** @@ -337,39 +323,47 @@ public synchronized void modifyCachePool(long poolId, CachePoolInfo info) * * Only the superuser should be able to call this function. * - * @param poolId - * The id of the cache pool to remove. + * @param poolName + * The name for the cache pool to remove. */ - public synchronized void removeCachePool(long poolId) throws IOException { - if (!cachePoolsById.containsKey(poolId)) { - throw new IOException("can't remove nonexistent cache pool id " + poolId); + public synchronized void removeCachePool(String poolName) + throws IOException { + CachePool pool = cachePools.remove(poolName); + if (pool == null) { + throw new IOException("can't remove nonexistent cache pool " + poolName); } - // Remove all the entries associated with the pool - Iterator> it = - entriesById.entrySet().iterator(); - while (it.hasNext()) { - Map.Entry entry = it.next(); - if (entry.getValue().getDirective().getPoolId() == poolId) { - it.remove(); - entriesByDirective.remove(entry.getValue().getDirective()); + + // Remove entries using this pool + // TODO: could optimize this somewhat to avoid the need to iterate + // over all entries in entriesByDirective + Iterator> iter = + entriesByDirective.entrySet().iterator(); + while (iter.hasNext()) { + Entry entry = iter.next(); + if (entry.getKey().getPool().equals(poolName)) { + entriesById.remove(entry.getValue().getEntryId()); + iter.remove(); } } - // Remove the pool - CachePool pool = cachePoolsById.remove(poolId); - cachePoolsByName.remove(pool.getInfo().getPoolName()); } - public synchronized List listCachePools(Long prevKey, - int maxRepliesPerRequest) { - final int MAX_PREALLOCATED_REPLIES = 16; - ArrayList results = - new ArrayList(Math.min(MAX_PREALLOCATED_REPLIES, - maxRepliesPerRequest)); - SortedMap tailMap = - cachePoolsById.tailMap(prevKey, false); - for (CachePool pool : tailMap.values()) { - results.add(pool); + public synchronized BatchedListEntries + listCachePools(FSPermissionChecker pc, String prevKey) { + final int NUM_PRE_ALLOCATED_ENTRIES = 16; + ArrayList results = + new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); + SortedMap tailMap = cachePools.tailMap(prevKey, false); + int numListed = 0; + for (Entry cur : tailMap.entrySet()) { + if (numListed++ >= maxListCachePoolsResponses) { + return new BatchedListEntries(results, true); + } + if (pc == null) { + results.add(cur.getValue().getInfo(true)); + } else { + results.add(cur.getValue().getInfo(pc)); + } } - return results; + return new BatchedListEntries(results, false); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 5de424ac7d8..14a786bb195 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -19,119 +19,137 @@ import java.io.IOException; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import javax.annotation.Nonnull; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo.Builder; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.UserGroupInformation; /** * A CachePool describes a set of cache resources being managed by the NameNode. * User caching requests are billed to the cache pool specified in the request. * - * CachePools are uniquely identified by a numeric id as well as the - * {@link CachePoolInfo} pool name. Mutable metadata is contained in - * CachePoolInfo, including pool name, owner, group, and permissions. - * See this class for more details. + * This is an internal class, only used on the NameNode. For identifying or + * describing a cache pool to clients, please use CachePoolInfo. */ +@InterfaceAudience.Private public final class CachePool { public static final Log LOG = LogFactory.getLog(CachePool.class); - private final long id; + @Nonnull + private final String poolName; - private CachePoolInfo info; + @Nonnull + private String ownerName; - public CachePool(long id) { - this.id = id; - this.info = null; - } - - CachePool(long id, String poolName, String ownerName, String groupName, + @Nonnull + private String groupName; + + @Nonnull + private FsPermission mode; + + private int weight; + + public CachePool(String poolName, String ownerName, String groupName, FsPermission mode, Integer weight) throws IOException { - this.id = id; - // Set CachePoolInfo default fields if null - if (poolName == null || poolName.isEmpty()) { - throw new IOException("invalid empty cache pool name"); - } + this.poolName = poolName; UserGroupInformation ugi = null; if (ownerName == null) { - ugi = NameNode.getRemoteUser(); - ownerName = ugi.getShortUserName(); + if (ugi == null) { + ugi = NameNode.getRemoteUser(); + } + this.ownerName = ugi.getShortUserName(); + } else { + this.ownerName = ownerName; } if (groupName == null) { if (ugi == null) { ugi = NameNode.getRemoteUser(); } - String[] groups = ugi.getGroupNames(); - if (groups.length == 0) { - throw new IOException("failed to get group names from UGI " + ugi); - } - groupName = groups[0]; + this.groupName = ugi.getPrimaryGroupName(); + } else { + this.groupName = ownerName; } - if (mode == null) { - mode = FsPermission.getDirDefault(); - } - if (weight == null) { - weight = 100; - } - CachePoolInfo.Builder builder = CachePoolInfo.newBuilder(); - builder.setPoolName(poolName).setOwnerName(ownerName) - .setGroupName(groupName).setMode(mode).setWeight(weight); - this.info = builder.build(); + this.mode = mode != null ? + new FsPermission(mode): FsPermission.getCachePoolDefault(); + this.weight = weight != null ? weight : 100; } - public CachePool(long id, CachePoolInfo info) { - this.id = id; - this.info = info; + public String getName() { + return poolName; } - /** - * @return id of the pool - */ - public long getId() { - return id; + public String getOwnerName() { + return ownerName; } + public CachePool setOwnerName(String ownerName) { + this.ownerName = ownerName; + return this; + } + + public String getGroupName() { + return groupName; + } + + public CachePool setGroupName(String groupName) { + this.groupName = groupName; + return this; + } + + public FsPermission getMode() { + return mode; + } + + public CachePool setMode(FsPermission mode) { + this.mode = new FsPermission(mode); + return this; + } + + public int getWeight() { + return weight; + } + + public CachePool setWeight(int weight) { + this.weight = weight; + return this; + } + /** * Get information about this cache pool. * + * @param fullInfo + * If true, only the name will be returned (i.e., what you + * would get if you didn't have read permission for this pool.) * @return * Cache pool information. */ - public CachePoolInfo getInfo() { - return info; + public CachePoolInfo getInfo(boolean fullInfo) { + CachePoolInfo info = new CachePoolInfo(poolName); + if (!fullInfo) { + return info; + } + return info.setOwnerName(ownerName). + setGroupName(groupName). + setMode(new FsPermission(mode)). + setWeight(weight); } - void setInfo(CachePoolInfo info) { - this.info = info; + public CachePoolInfo getInfo(FSPermissionChecker pc) { + return getInfo(pc.checkPermission(this, FsAction.READ)); } public String toString() { return new StringBuilder(). - append("{ ").append("id:").append(id). - append(", info:").append(info.toString()). + append("{ ").append("poolName:").append(poolName). + append(", ownerName:").append(ownerName). + append(", groupName:").append(groupName). + append(", mode:").append(mode). + append(", weight:").append(weight). append(" }").toString(); } - - @Override - public int hashCode() { - return new HashCodeBuilder().append(id).append(info).hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { return false; } - if (obj == this) { return true; } - if (obj.getClass() != getClass()) { - return false; - } - CachePool rhs = (CachePool)obj; - return new EqualsBuilder() - .append(id, rhs.id) - .append(info, rhs.info) - .isEquals(); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 040a3b422ec..42bc6205c31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -119,6 +119,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -6756,9 +6757,11 @@ List> addPathCacheDirectives( if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { return (List>) retryCacheEntry.getPayload(); } - final FSPermissionChecker pc = getPermissionChecker(); + final FSPermissionChecker pc = isPermissionEnabled ? + getPermissionChecker() : null; boolean success = false; List> results = null; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -6766,7 +6769,7 @@ List> addPathCacheDirectives( throw new SafeModeException( "Cannot add path cache directive", safeMode); } - results = cacheManager.addDirectives(pc, directives); + results = cacheManager.addDirectives(directives, pc); //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 success = true; } finally { @@ -6774,7 +6777,7 @@ List> addPathCacheDirectives( if (success) { getEditLog().logSync(); } - if (auditLog.isInfoEnabled() && isExternalInvocation()) { + if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(success, "addPathCacheDirectives", null, null, null); } RetryCache.setState(retryCacheEntry, success, results); @@ -6783,147 +6786,175 @@ List> addPathCacheDirectives( } @SuppressWarnings("unchecked") - List> removePathCacheEntries(List ids) - throws IOException { - final FSPermissionChecker pc = getPermissionChecker(); + List> removePathCacheEntries(List ids) throws IOException { + CacheEntryWithPayload retryCacheEntry = + RetryCache.waitForCompletion(retryCache, null); + if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { + return (List>) retryCacheEntry.getPayload(); + } + final FSPermissionChecker pc = isPermissionEnabled ? + getPermissionChecker() : null; boolean success = false; List> results = null; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { throw new SafeModeException( - "Cannot add path cache directive", safeMode); + "Cannot remove path cache directives", safeMode); } - results = cacheManager.removeEntries(pc, ids); + results = cacheManager.removeEntries(ids, pc); //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); - if (success) { - getEditLog().logSync(); - } - if (auditLog.isInfoEnabled() && isExternalInvocation()) { + if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(success, "removePathCacheEntries", null, null, null); } + RetryCache.setState(retryCacheEntry, success, results); + } + getEditLog().logSync(); + return results; + } + + BatchedListEntries listPathCacheEntries(long startId, + String pool) throws IOException { + final FSPermissionChecker pc = isPermissionEnabled ? + getPermissionChecker() : null; + BatchedListEntries results; + checkOperation(OperationCategory.READ); + readLock(); + boolean success = false; + try { + checkOperation(OperationCategory.READ); + results = cacheManager.listPathCacheEntries(startId, pool, pc); + success = true; + } finally { + readUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "listPathCacheEntries", null, null, null); + } } return results; } - List listPathCacheEntries(long startId, - Long poolId, int maxReplies) throws IOException { - LOG.info("listPathCacheEntries with " + startId + " " + poolId); - final FSPermissionChecker pc = getPermissionChecker(); - return cacheManager.listPathCacheEntries(pc, startId, poolId, maxReplies); - } - - public CachePool addCachePool(CachePoolInfo req) throws IOException { - final FSPermissionChecker pc = getPermissionChecker(); - CacheEntryWithPayload cacheEntry = - RetryCache.waitForCompletion(retryCache, null); - if (cacheEntry != null && cacheEntry.isSuccess()) { - return (CachePool)cacheEntry.getPayload(); // Return previous response - } - writeLock(); - CachePool pool = null; - try { - checkOperation(OperationCategory.WRITE); - if (!pc.isSuperUser()) { - throw new AccessControlException("Non-super users cannot " + - "add cache pools."); - } - if (isInSafeMode()) { - throw new SafeModeException( - "Cannot add cache pool " + req.getPoolName(), safeMode); - } - pool = cacheManager.addCachePool(req); - RetryCache.setState(cacheEntry, true); - //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 - } finally { - writeUnlock(); - } - - getEditLog().logSync(); - if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(true, "addCachePool", req.getPoolName(), null, null); - } - return pool; - } - - public void modifyCachePool(long poolId, CachePoolInfo info) - throws IOException { - final FSPermissionChecker pc = getPermissionChecker(); + public void addCachePool(CachePoolInfo req) throws IOException { + final FSPermissionChecker pc = isPermissionEnabled ? + getPermissionChecker() : null; CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); if (cacheEntry != null && cacheEntry.isSuccess()) { return; // Return previous response } + checkOperation(OperationCategory.WRITE); writeLock(); + boolean success = false; try { checkOperation(OperationCategory.WRITE); - if (!pc.isSuperUser()) { - throw new AccessControlException("Non-super users cannot " + - "modify cache pools."); - } if (isInSafeMode()) { throw new SafeModeException( - "Cannot modify cache pool " + info.getPoolName(), safeMode); + "Cannot add cache pool " + req.getPoolName(), safeMode); } - cacheManager.modifyCachePool(poolId, info); - RetryCache.setState(cacheEntry, true); + if (pc != null) { + pc.checkSuperuserPrivilege(); + } + cacheManager.addCachePool(req); + //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 + success = true; + } finally { + writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "addCachePool", req.getPoolName(), null, null); + } + RetryCache.setState(cacheEntry, success); + } + + getEditLog().logSync(); + } + + public void modifyCachePool(CachePoolInfo req) throws IOException { + final FSPermissionChecker pc = + isPermissionEnabled ? getPermissionChecker() : null; + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + checkOperation(OperationCategory.WRITE); + writeLock(); + boolean success = false; + try { + checkOperation(OperationCategory.WRITE); + if (isInSafeMode()) { + throw new SafeModeException( + "Cannot modify cache pool " + req.getPoolName(), safeMode); + } + if (pc != null) { + pc.checkSuperuserPrivilege(); + } + cacheManager.modifyCachePool(req); //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 + success = true; } finally { writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null); + } + RetryCache.setState(cacheEntry, success); } getEditLog().logSync(); - if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(true, "modifyCachePool", info.getPoolName(), null, null); - } } - public void removeCachePool(long poolId) throws IOException { - final FSPermissionChecker pc = getPermissionChecker(); + public void removeCachePool(String cachePoolName) throws IOException { + final FSPermissionChecker pc = + isPermissionEnabled ? getPermissionChecker() : null; + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + checkOperation(OperationCategory.WRITE); writeLock(); - CachePool pool; + boolean success = false; try { checkOperation(OperationCategory.WRITE); - if (!pc.isSuperUser()) { - throw new AccessControlException("Non-super users cannot " + - "remove cache pools."); - } - pool = cacheManager.getCachePool(poolId); if (isInSafeMode()) { - String identifier; - if (pool == null) { - identifier = "with id " + Long.toString(poolId); - } else { - identifier = pool.getInfo().getPoolName(); - } throw new SafeModeException( - "Cannot remove cache pool " + identifier, safeMode); + "Cannot remove cache pool " + cachePoolName, safeMode); } - cacheManager.removeCachePool(poolId); + if (pc != null) { + pc.checkSuperuserPrivilege(); + } + cacheManager.removeCachePool(cachePoolName); //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 + success = true; } finally { writeUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "removeCachePool", cachePoolName, null, null); + } + RetryCache.setState(cacheEntry, success); } - + getEditLog().logSync(); - if (auditLog.isInfoEnabled() && isExternalInvocation()) { - logAuditEvent(true, "removeCachePool", pool.getInfo().getPoolName(), - null, null); - } } - public List listCachePools(long prevKey, - int maxRepliesPerRequest) throws IOException { - List results; + public BatchedListEntries listCachePools(String prevKey) + throws IOException { + final FSPermissionChecker pc = + isPermissionEnabled ? getPermissionChecker() : null; + BatchedListEntries results; + checkOperation(OperationCategory.READ); + boolean success = false; readLock(); try { checkOperation(OperationCategory.READ); - results = cacheManager.listCachePools(prevKey, maxRepliesPerRequest); + results = cacheManager.listCachePools(pc, prevKey); + success = true; } finally { readUnlock(); + if (isAuditEnabled() && isExternalInvocation()) { + logAuditEvent(success, "listCachePools", null, null, null); + } } return results; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 54f7463014e..644a6397f93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -28,7 +29,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -264,16 +264,15 @@ private void checkStickyBit(INode parent, INode inode, Snapshot snapshot * @return if the pool can be accessed */ public boolean checkPermission(CachePool pool, FsAction access) { - CachePoolInfo info = pool.getInfo(); - FsPermission mode = info.getMode(); + FsPermission mode = pool.getMode(); if (isSuperUser()) { return true; } - if (user.equals(info.getOwnerName()) + if (user.equals(pool.getOwnerName()) && mode.getUserAction().implies(access)) { return true; } - if (groups.contains(info.getGroupName()) + if (groups.contains(pool.getGroupName()) && mode.getGroupAction().implies(access)) { return true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index f5f85d124fe..bcaefd4df07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -31,11 +31,13 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; +import java.util.NoSuchElementException; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -60,9 +62,9 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -1223,20 +1225,17 @@ public List> removePathCacheEntries(List ids) private class ServerSidePathCacheEntriesIterator extends BatchedRemoteIterator { - private final Long poolId; + private final String pool; - public ServerSidePathCacheEntriesIterator(Long firstKey, - int maxRepliesPerRequest, Long poolId) { - super(firstKey, maxRepliesPerRequest); - this.poolId = poolId; + public ServerSidePathCacheEntriesIterator(Long firstKey, String pool) { + super(firstKey); + this.pool = pool; } @Override public BatchedEntries makeRequest( - Long prevKey, int maxRepliesPerRequest) throws IOException { - return new BatchedListEntries( - namesystem.listPathCacheEntries(prevKey, poolId, - maxRepliesPerRequest)); + Long nextKey) throws IOException { + return namesystem.listPathCacheEntries(nextKey, pool); } @Override @@ -1244,52 +1243,50 @@ public Long elementToPrevKey(PathCacheEntry entry) { return entry.getEntryId(); } } - + @Override public RemoteIterator listPathCacheEntries(long prevId, - long poolId, int maxReplies) throws IOException { - return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, poolId); + String pool) throws IOException { + return new ServerSidePathCacheEntriesIterator(prevId, pool); } @Override - public CachePool addCachePool(CachePoolInfo info) throws IOException { - return namesystem.addCachePool(info); + public void addCachePool(CachePoolInfo info) throws IOException { + namesystem.addCachePool(info); } @Override - public void modifyCachePool(long poolId, CachePoolInfo info) - throws IOException { - namesystem.modifyCachePool(poolId, info); + public void modifyCachePool(CachePoolInfo info) throws IOException { + namesystem.modifyCachePool(info); } @Override - public void removeCachePool(long poolId) throws IOException { - namesystem.removeCachePool(poolId); + public void removeCachePool(String cachePoolName) throws IOException { + namesystem.removeCachePool(cachePoolName); } private class ServerSideCachePoolIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { - public ServerSideCachePoolIterator(long prevId, int maxRepliesPerRequest) { - super(prevId, maxRepliesPerRequest); + public ServerSideCachePoolIterator(String prevKey) { + super(prevKey); } @Override - public BatchedEntries makeRequest(Long prevId, - int maxRepliesPerRequest) throws IOException { - return new BatchedListEntries( - namesystem.listCachePools(prevId, maxRepliesPerRequest)); + public BatchedEntries makeRequest(String prevKey) + throws IOException { + return namesystem.listCachePools(prevKey); } @Override - public Long elementToPrevKey(CachePool element) { - return element.getId(); + public String elementToPrevKey(CachePoolInfo element) { + return element.getPoolName(); } } @Override - public RemoteIterator listCachePools(long prevPoolId, - int maxRepliesPerRequest) throws IOException { - return new ServerSideCachePoolIterator(prevPoolId, maxRepliesPerRequest); + public RemoteIterator listCachePools(String prevKey) + throws IOException { + return new ServerSideCachePoolIterator(prevKey); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index f196a7074ce..c097c3b696a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -363,27 +363,9 @@ message IsFileClosedResponseProto { required bool result = 1; } -message CachePoolInfoProto { - optional string poolName = 1; - optional string ownerName = 2; - optional string groupName = 3; - optional int32 mode = 4; - optional int32 weight = 5; -} - -message CachePoolProto { - optional int64 id = 1; - optional CachePoolInfoProto info = 2; -} - message PathCacheDirectiveProto { required string path = 1; - required CachePoolProto pool = 2; -} - -message PathCacheEntryProto { - required int64 id = 1; - optional PathCacheDirectiveProto directive = 2; + required string pool = 2; } message AddPathCacheDirectivesRequestProto { @@ -417,42 +399,52 @@ enum RemovePathCacheEntryErrorProto { } message ListPathCacheEntriesRequestProto { - required PathCacheEntryProto prevEntry = 1; - required CachePoolProto pool = 2; - optional int32 maxReplies = 3; + required int64 prevId = 1; + required string pool = 2; +} + +message ListPathCacheEntriesElementProto { + required int64 id = 1; + required string path = 2; + required string pool = 3; } message ListPathCacheEntriesResponseProto { - repeated PathCacheEntryProto entries = 1; + repeated ListPathCacheEntriesElementProto elements = 1; required bool hasMore = 2; } message AddCachePoolRequestProto { - required CachePoolInfoProto info = 1; + required string poolName = 1; + optional string ownerName = 2; + optional string groupName = 3; + optional int32 mode = 4; + optional int32 weight = 5; } -message AddCachePoolResponseProto { - required CachePoolProto pool = 1; +message AddCachePoolResponseProto { // void response } message ModifyCachePoolRequestProto { - required CachePoolProto pool = 1; - required CachePoolInfoProto info = 2; + required string poolName = 1; + optional string ownerName = 2; + optional string groupName = 3; + optional int32 mode = 4; + optional int32 weight = 5; } message ModifyCachePoolResponseProto { // void response } message RemoveCachePoolRequestProto { - required CachePoolProto pool = 1; + required string poolName = 1; } message RemoveCachePoolResponseProto { // void response } message ListCachePoolsRequestProto { - required CachePoolProto prevPool = 1; - required int32 maxReplies = 2; + required string prevPoolName = 1; } message ListCachePoolsResponseProto { @@ -461,7 +453,11 @@ message ListCachePoolsResponseProto { } message ListCachePoolsResponseElementProto { - required CachePoolProto pool = 1; + required string poolName = 1; + required string ownerName = 2; + required string groupName = 3; + required int32 mode = 4; + required int32 weight = 5; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java index 17dea9aee93..ddf8e169708 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; +import static org.junit.Assert.*; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -31,64 +31,58 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Fallible; -import org.junit.After; -import org.junit.Before; import org.junit.Test; public class TestPathCacheRequests { static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); - private static Configuration conf = new HdfsConfiguration(); - private static MiniDFSCluster cluster = null; - private static NamenodeProtocols proto = null; - - @Before - public void setUp() throws Exception { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - proto = cluster.getNameNodeRpc(); - } - - @After - public void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - } - } + private static final UserGroupInformation unprivilegedUser = + UserGroupInformation.createRemoteUser("unprivilegedUser"); @Test public void testCreateAndRemovePools() throws Exception { - CachePoolInfo req = - CachePoolInfo.newBuilder().setPoolName("pool1").setOwnerName("bob") - .setGroupName("bobgroup").setMode(new FsPermission((short) 0755)) - .setWeight(150).build(); - CachePool pool = proto.addCachePool(req); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + NamenodeProtocols proto = cluster.getNameNodeRpc(); + CachePoolInfo req = new CachePoolInfo("pool1"). + setOwnerName("bob").setGroupName("bobgroup"). + setMode(new FsPermission((short)0755)).setWeight(150); + proto.addCachePool(req); try { - proto.removeCachePool(909); + proto.removeCachePool("pool99"); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "nonexistent cache pool", ioe); } - proto.removeCachePool(pool.getId()); + proto.removeCachePool("pool1"); try { - proto.removeCachePool(pool.getId()); + proto.removeCachePool("pool1"); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "nonexistent cache pool", ioe); } req = new CachePoolInfo("pool2"); proto.addCachePool(req); @@ -96,42 +90,36 @@ public void testCreateAndRemovePools() throws Exception { @Test public void testCreateAndModifyPools() throws Exception { - // Create a new pool - CachePoolInfo info = CachePoolInfo.newBuilder(). - setPoolName("pool1"). - setOwnerName("abc"). - setGroupName("123"). - setMode(new FsPermission((short)0755)). - setWeight(150). - build(); - CachePool pool = proto.addCachePool(info); - CachePoolInfo actualInfo = pool.getInfo(); - assertEquals("Expected info to match create time settings", - info, actualInfo); - // Modify the pool - info = CachePoolInfo.newBuilder(). - setPoolName("pool2"). - setOwnerName("def"). - setGroupName("456"). - setMode(new FsPermission((short)0644)). - setWeight(200). - build(); - proto.modifyCachePool(pool.getId(), info); - // Check via listing this time - RemoteIterator iter = proto.listCachePools(0, 1); - CachePool listedPool = iter.next(); - actualInfo = listedPool.getInfo(); - assertEquals("Expected info to match modified settings", info, actualInfo); + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = null; + // set low limits here for testing purposes + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + NamenodeProtocols proto = cluster.getNameNodeRpc(); + proto.addCachePool(new CachePoolInfo("pool1"). + setOwnerName("abc").setGroupName("123"). + setMode(new FsPermission((short)0755)).setWeight(150)); + proto.modifyCachePool(new CachePoolInfo("pool1"). + setOwnerName("def").setGroupName("456")); + RemoteIterator iter = proto.listCachePools(""); + CachePoolInfo info = iter.next(); + assertEquals("pool1", info.getPoolName()); + assertEquals("def", info.getOwnerName()); + assertEquals("456", info.getGroupName()); + assertEquals(new FsPermission((short)0755), info.getMode()); + assertEquals(Integer.valueOf(150), info.getWeight()); try { - proto.removeCachePool(808); + proto.removeCachePool("pool99"); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { } - proto.removeCachePool(pool.getId()); + proto.removeCachePool("pool1"); try { - proto.removeCachePool(pool.getId()); + proto.removeCachePool("pool1"); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { @@ -142,13 +130,13 @@ private static void validateListAll( RemoteIterator iter, long id0, long id1, long id2) throws Exception { Assert.assertEquals(new PathCacheEntry(id0, - new PathCacheDirective("/alpha", 1)), + new PathCacheDirective("/alpha", "pool1")), iter.next()); Assert.assertEquals(new PathCacheEntry(id1, - new PathCacheDirective("/beta", 2)), + new PathCacheDirective("/beta", "pool2")), iter.next()); Assert.assertEquals(new PathCacheEntry(id2, - new PathCacheDirective("/gamma", 1)), + new PathCacheDirective("/gamma", "pool1")), iter.next()); Assert.assertFalse(iter.hasNext()); } @@ -161,36 +149,34 @@ public void testSetAndGet() throws Exception { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - final CachePool pool1 = proto.addCachePool(new CachePoolInfo("pool1")); - final CachePool pool2 = proto.addCachePool(new CachePoolInfo("pool2")); - final CachePool pool3 = proto.addCachePool(new CachePoolInfo("pool3")); - final CachePool pool4 = proto.addCachePool(CachePoolInfo.newBuilder() - .setPoolName("pool4") - .setMode(new FsPermission((short)0)).build()); - UserGroupInformation testUgi = UserGroupInformation - .createUserForTesting("myuser", new String[]{"mygroup"}); - List> addResults1 = testUgi.doAs( - new PrivilegedExceptionAction>>() { - @Override - public List> run() throws IOException { - List> entries; - entries = proto.addPathCacheDirectives( - Arrays.asList(new PathCacheDirective[] { - new PathCacheDirective("/alpha", pool1.getId()), - new PathCacheDirective("/beta", pool2.getId()), - new PathCacheDirective("", pool3.getId()), - new PathCacheDirective("/zeta", 404), - new PathCacheDirective("/zeta", pool4.getId()) - })); - return entries; + final NamenodeProtocols proto = cluster.getNameNodeRpc(); + proto.addCachePool(new CachePoolInfo("pool1"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool2"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool3"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool4"). + setMode(new FsPermission((short)0))); + + List> addResults1 = + unprivilegedUser.doAs(new PrivilegedExceptionAction< + List>>() { + @Override + public List> run() throws IOException { + return proto.addPathCacheDirectives(Arrays.asList( + new PathCacheDirective[] { + new PathCacheDirective("/alpha", "pool1"), + new PathCacheDirective("/beta", "pool2"), + new PathCacheDirective("", "pool3"), + new PathCacheDirective("/zeta", "nonexistent_pool"), + new PathCacheDirective("/zeta", "pool4") + })); } - }); - // Save the successful additions + }); long ids1[] = new long[2]; - for (int i=0; i<2; i++) { - ids1[i] = addResults1.get(i).get().getEntryId(); - } - // Verify that the unsuccessful additions failed properly + ids1[0] = addResults1.get(0).get().getEntryId(); + ids1[1] = addResults1.get(1).get().getEntryId(); try { addResults1.get(2).get(); Assert.fail("expected an error when adding an empty path"); @@ -201,7 +187,7 @@ public List> run() throws IOException { addResults1.get(3).get(); Assert.fail("expected an error when adding to a nonexistent pool."); } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); } try { addResults1.get(4).get(); @@ -215,10 +201,10 @@ public List> run() throws IOException { List> addResults2 = proto.addPathCacheDirectives(Arrays.asList( new PathCacheDirective[] { - new PathCacheDirective("/alpha", pool1.getId()), - new PathCacheDirective("/theta", 404), - new PathCacheDirective("bogus", pool1.getId()), - new PathCacheDirective("/gamma", pool1.getId()) + new PathCacheDirective("/alpha", "pool1"), + new PathCacheDirective("/theta", ""), + new PathCacheDirective("bogus", "pool1"), + new PathCacheDirective("/gamma", "pool1") })); long id = addResults2.get(0).get().getEntryId(); Assert.assertEquals("expected to get back the same ID as last time " + @@ -228,7 +214,7 @@ public List> run() throws IOException { Assert.fail("expected an error when adding a path cache " + "directive with an empty pool name."); } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); + Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); } try { addResults2.get(2).get(); @@ -240,16 +226,14 @@ public List> run() throws IOException { long ids2[] = new long[1]; ids2[0] = addResults2.get(3).get().getEntryId(); - // Validate listing all entries RemoteIterator iter = - proto.listPathCacheEntries(-1l, -1l, 100); + proto.listPathCacheEntries(0, ""); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathCacheEntries(-1l, -1l, 1); + iter = proto.listPathCacheEntries(0, ""); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - // Validate listing certain pools - iter = proto.listPathCacheEntries(0, pool3.getId(), 1); + iter = proto.listPathCacheEntries(0, "pool3"); Assert.assertFalse(iter.hasNext()); - iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); + iter = proto.listPathCacheEntries(0, "pool2"); Assert.assertEquals(addResults1.get(1).get(), iter.next()); Assert.assertFalse(iter.hasNext()); @@ -271,7 +255,7 @@ public List> run() throws IOException { } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); } - iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); + iter = proto.listPathCacheEntries(0, "pool2"); Assert.assertFalse(iter.hasNext()); } finally { if (cluster != null) { cluster.shutdown(); } From 7a74ca3694f60825e5142dfa2c3d524cbd84725e Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 6 Sep 2013 20:07:38 +0000 Subject: [PATCH 13/51] HDFS-5169. hdfs.c: translateZCRException: null pointer deref when translating some exceptions (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520679 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 ++- hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 97777dfb280..0a903312a96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -27,7 +27,8 @@ HDFS-4949 (Unreleased) HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin Patrick McCabe) - OPTIMIZATIONS BUG FIXES + HDFS-5169. hdfs.c: translateZCRException: null pointer deref when + translating some exceptions. (Contributed by Colin Patrick McCabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index cfffe385a8d..66799c82c43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -2270,7 +2270,7 @@ static int translateZCRException(JNIEnv *env, jthrowable exc) ret = EPROTONOSUPPORT; goto done; } - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + ret = printExceptionAndFree(env, exc, PRINT_EXC_ALL, "hadoopZeroCopyRead: ZeroCopyCursor#read failed"); done: free(className); From 3a9cd79e9ddd5a9499e28633ccccdc9eef22b813 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 9 Sep 2013 18:53:01 +0000 Subject: [PATCH 14/51] HDFS-5120. Add command-line support for manipulating cache pools. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1521240 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/util/StringUtils.java | 77 +++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 4 + .../hadoop/hdfs/DistributedFileSystem.java | 50 ++- .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 4 +- .../hdfs/server/namenode/CachePool.java | 11 + .../apache/hadoop/hdfs/tools/DFSAdmin.java | 314 ++++++++++++++++++ .../src/test/resources/testHDFSConf.xml | 67 +++- 8 files changed, 523 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index 284a042e83b..32e5572c2e9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -905,4 +905,81 @@ public static String getStackTrace(Thread t) { } return str.toString(); } + + /** + * From a list of command-line arguments, remove both an option and the + * next argument. + * + * @param name Name of the option to remove. Example: -foo. + * @param args List of arguments. + * @return null if the option was not found; the value of the + * option otherwise. + */ + public static String popOptionWithArgument(String name, List args) { + String val = null; + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + // stop parsing arguments when you see -- + break; + } else if (cur.equals(name)) { + iter.remove(); + if (!iter.hasNext()) { + throw new RuntimeException("option " + name + " requires 1 " + + "argument."); + } + val = iter.next(); + iter.remove(); + break; + } + } + return val; + } + + /** + * From a list of command-line arguments, remove an option. + * + * @param name Name of the option to remove. Example: -foo. + * @param args List of arguments. + * @return true if the option was found and removed; false otherwise. + */ + public static boolean popOption(String name, List args) { + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + // stop parsing arguments when you see -- + break; + } else if (cur.equals(name)) { + iter.remove(); + return true; + } + } + return false; + } + + /** + * From a list of command-line arguments, return the first non-option + * argument. Non-option arguments are those which either come after + * a double dash (--) or do not start with a dash. + * + * @param args List of arguments. + * @return The first non-option argument, or null if there were none. + */ + public static String popFirstNonOption(List args) { + for (Iterator iter = args.iterator(); iter.hasNext(); ) { + String cur = iter.next(); + if (cur.equals("--")) { + if (!iter.hasNext()) { + return null; + } + cur = iter.next(); + iter.remove(); + return cur; + } else if (!cur.startsWith("-")) { + iter.remove(); + return cur; + } + } + return null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 0a903312a96..69d33a6927f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -27,6 +27,10 @@ HDFS-4949 (Unreleased) HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin Patrick McCabe) + HDFS-5120. Add command-line support for manipulating cache pools. + (Contributed by Colin Patrick McCabe) + + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 6cb84741ab1..c779f889a2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -1578,5 +1579,52 @@ public Boolean next(final FileSystem fs, final Path p) } }.resolve(this, absF); } - + + /** + * Add a cache pool. + * + * @param req + * The request to add a cache pool. + * @throws IOException + * If the request could not be completed. + */ + public void addCachePool(CachePoolInfo info) throws IOException { + dfs.namenode.addCachePool(info); + } + + /** + * Modify an existing cache pool. + * + * @param req + * The request to modify a cache pool. + * @throws IOException + * If the request could not be completed. + */ + public void modifyCachePool(CachePoolInfo info) throws IOException { + dfs.namenode.modifyCachePool(info); + } + + /** + * Remove a cache pool. + * + * @param cachePoolName + * Name of the cache pool to remove. + * @throws IOException + * if the cache pool did not exist, or could not be removed. + */ + public void removeCachePool(String name) throws IOException { + dfs.namenode.removeCachePool(name); + } + + /** + * List all cache pools. + * + * @return A remote iterator from which you can get CachePoolInfo objects. + * Requests will be made as needed. + * @throws IOException + * If there was an error listing cache pools. + */ + public RemoteIterator listCachePools() throws IOException { + return dfs.namenode.listCachePools(""); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 45b041ffb4a..cc31c397c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1144,7 +1144,7 @@ public RemoteIterator listPathCacheEntries(long prevId, public void addCachePool(CachePoolInfo info) throws IOException; /** - * Modify a cache pool. + * Modify an existing cache pool. * * @param req * The request to modify a cache pool. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 53499c86a69..b71e4d0d369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -254,9 +254,7 @@ public synchronized List> removeEntries(List entryIds, public synchronized void addCachePool(CachePoolInfo info) throws IOException { String poolName = info.getPoolName(); - if (poolName.isEmpty()) { - throw new IOException("invalid empty cache pool name"); - } + CachePool.validateName(poolName); CachePool pool = cachePools.get(poolName); if (pool != null) { throw new IOException("cache pool " + poolName + " already exists."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 14a786bb195..36ebd402e41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -40,6 +40,8 @@ public final class CachePool { public static final Log LOG = LogFactory.getLog(CachePool.class); + public static final int DEFAULT_WEIGHT = 100; + @Nonnull private final String poolName; @@ -152,4 +154,13 @@ public String toString() { append(", weight:").append(weight). append(" }").toString(); } + + public static void validateName(String name) throws IOException { + if (name.isEmpty()) { + // Empty pool names are not allowed because they would be highly + // confusing. They would also break the ability to list all pools + // by starting with prevKey = "" + throw new IOException("invalid empty cache pool name"); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 98691df6a57..912569a9c1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.TreeSet; @@ -36,6 +37,8 @@ import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -44,12 +47,14 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.ipc.RPC; @@ -62,6 +67,8 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import com.google.common.base.Joiner; + /** * This class provides some DFS administrative access shell commands. */ @@ -455,6 +462,234 @@ public int saveNamespace() throws IOException { return exitCode; } + final private static String ADD_CACHE_POOL_USAGE = + "-addCachePool [-owner ] " + + "[-group ] [-mode ] [-weight ]"; + + public int addCachePool(String argsArray[], int idx) throws IOException { + List args= new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String owner = StringUtils.popOptionWithArgument("-owner", args); + if (owner == null) { + owner = UserGroupInformation.getCurrentUser().getShortUserName(); + } + String group = StringUtils.popOptionWithArgument("-group", args); + if (group == null) { + group = UserGroupInformation.getCurrentUser().getGroupNames()[0]; + } + String modeString = StringUtils.popOptionWithArgument("-mode", args); + int mode; + if (modeString == null) { + mode = FsPermission.getCachePoolDefault().toShort(); + } else { + mode = Integer.parseInt(modeString, 8); + } + String weightString = StringUtils.popOptionWithArgument("-weight", args); + int weight; + if (weightString == null) { + weight = CachePool.DEFAULT_WEIGHT; + } else { + weight = Integer.parseInt(weightString); + } + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + ADD_CACHE_POOL_USAGE); + return 1; + } + DistributedFileSystem dfs = getDFS(); + CachePoolInfo info = new CachePoolInfo(name). + setOwnerName(owner). + setGroupName(group). + setMode(new FsPermission((short)mode)). + setWeight(weight); + try { + dfs.addCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully added cache pool " + name + "."); + return 0; + } + + final private static String MODIFY_CACHE_POOL_USAGE = + "-modifyCachePool [-owner ] " + + "[-group ] [-mode ] [-weight ]"; + + public int modifyCachePool(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String owner = StringUtils.popOptionWithArgument("-owner", args); + String group = StringUtils.popOptionWithArgument("-group", args); + String modeString = StringUtils.popOptionWithArgument("-mode", args); + Integer mode = (modeString == null) ? + null : Integer.parseInt(modeString, 8); + String weightString = StringUtils.popOptionWithArgument("-weight", args); + Integer weight = (weightString == null) ? + null : Integer.parseInt(weightString); + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("usage is " + MODIFY_CACHE_POOL_USAGE); + return 1; + } + boolean changed = false; + CachePoolInfo info = new CachePoolInfo(name); + if (owner != null) { + info.setOwnerName(owner); + changed = true; + } + if (group != null) { + info.setGroupName(group); + changed = true; + } + if (mode != null) { + info.setMode(new FsPermission(mode.shortValue())); + changed = true; + } + if (weight != null) { + info.setWeight(weight); + changed = true; + } + if (!changed) { + System.err.println("You must specify at least one attribute to " + + "change in the cache pool."); + return 1; + } + DistributedFileSystem dfs = getDFS(); + try { + dfs.modifyCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.print("Successfully modified cache pool " + name); + String prefix = " to have "; + if (owner != null) { + System.out.print(prefix + "owner name " + owner); + prefix = "and "; + } + if (group != null) { + System.out.print(prefix + "group name " + group); + prefix = "and "; + } + if (mode != null) { + System.out.print(prefix + "mode " + new FsPermission(mode.shortValue())); + prefix = "and "; + } + if (weight != null) { + System.out.print(prefix + "weight " + weight); + prefix = "and "; + } + System.out.print("\n"); + return 0; + } + + final private static String REMOVE_CACHE_POOL_USAGE = + "-removeCachePool "; + + public int removeCachePool(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when deleting a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + REMOVE_CACHE_POOL_USAGE); + return 1; + } + DistributedFileSystem dfs = getDFS(); + try { + dfs.removeCachePool(name); + } catch (IOException e) { + dfs.removeCachePool(name); + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully removed cache pool " + name + "."); + return 0; + } + + final private static String LIST_CACHE_POOLS_USAGE = + "-listCachePools] [-verbose] [name]"; + + private void listCachePool(CachePoolInfo info) { + System.out.print(String.format("%s\n", info.getPoolName())); + System.out.print(String.format("owner:\t%s\n", info.getOwnerName())); + System.out.print(String.format("group:\t%s\n", info.getGroupName())); + System.out.print(String.format("mode:\t%s\n", info.getMode())); + System.out.print(String.format("weight:\t%d\n", info.getWeight())); + System.out.print("\n"); + } + + public int listCachePools(String argsArray[], int idx) throws IOException { + List args = new LinkedList(); + for (int i = idx; i < argsArray.length; i++) { + args.add(argsArray[i]); + } + String name = StringUtils.popFirstNonOption(args); + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("usage is " + LIST_CACHE_POOLS_USAGE); + return 1; + } + boolean gotResults = false; + DistributedFileSystem dfs = getDFS(); + try { + RemoteIterator iter = dfs.listCachePools(); + if (name != null) { + while (iter.hasNext()) { + CachePoolInfo info = iter.next(); + if (info.getPoolName().equals(name)) { + listCachePool(info); + gotResults = true; + return 0; + } + } + } else { + while (iter.hasNext()) { + listCachePool(iter.next()); + gotResults = true; + } + } + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + int ret = 0; + if (!gotResults) { + if (name != null) { + System.out.println("No cache pool named " + name + " found."); + ret = 1; + } else { + System.out.println("No cache pools found."); + ret = 1; + } + } + return ret; + } + public int rollEdits() throws IOException { DistributedFileSystem dfs = getDFS(); long txid = dfs.rollEdits(); @@ -582,6 +817,10 @@ private void printHelp(String cmd) { "\t[-fetchImage ]\n" + "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + + "\t[" + ADD_CACHE_POOL_USAGE + "]\n" + + "\t[" + MODIFY_CACHE_POOL_USAGE + "]\n" + + "\t[" + REMOVE_CACHE_POOL_USAGE + "]\n" + + "\t[" + LIST_CACHE_POOLS_USAGE + "]\n" + "\t[-help [cmd]]\n"; String report ="-report: \tReports basic filesystem information and statistics.\n"; @@ -679,6 +918,42 @@ private void printHelp(String cmd) { String disallowSnapshot = "-disallowSnapshot :\n" + "\tDo not allow snapshots to be taken on a directory any more.\n"; + String addCachePool = ADD_CACHE_POOL_USAGE + ": \n" + + "\tAdd a new cache pool.\n" + + "\t is the name of the new pool. It must not already be used.\n" + + "\t is the owner of the pool. It defaults to the current\n" + + "\tuser name.\n" + + "\t is the group of the pool. It defaults to the primary\n" + + "\tgroup name of the current user.\n" + + "\t is the mode of the pool. This is a UNIX-style numeric mode\n" + + "\targument, supplied as an octal number. For example, mode 0755\n" + + "\tgrants the owner all permissions, and grants everyone else\n" + + "\tonly read and list permissions.\n" + + "\tThe mode defaults to " + + String.format("0%03o", + FsPermission.getCachePoolDefault().toShort()) + "\n" + + "\t is the weight of the pool. This determines what share \n" + + "\tof cluster resources the pool will get. It defaults to " + + CachePool.DEFAULT_WEIGHT + "\n"; + + String modifyCachePool = MODIFY_CACHE_POOL_USAGE + ": \n" + + "\tAdd a new cache pool with the given name.\n" + + "\t is the name of the pool to modify.\n" + + "\t is the new owner of the pool.\n" + + "\t is the new group of the pool.\n" + + "\t is the new mode of the pool.\n" + + "\t is the new weight of the pool.\n"; + + String removeCachePool = REMOVE_CACHE_POOL_USAGE + ": \n" + + "\tRemove a cache pool.\n" + + "\t is the name of the pool to remove.\n"; + + String listCachePools = " -listCachePools [-name ] [-verbose]\n" + + "\tList cache pools.\n" + + "\tIf is specified, we will list only the cache pool with\n" + + "\tthat name. If is specified, we will list detailed\n" + + "\tinformation about each pool\n"; + String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + "\t\tis specified.\n"; @@ -726,6 +1001,14 @@ private void printHelp(String cmd) { System.out.println(allowSnapshot); } else if ("disallowSnapshot".equalsIgnoreCase(cmd)) { System.out.println(disallowSnapshot); + } else if ("addCachePool".equalsIgnoreCase(cmd)) { + System.out.println(addCachePool); + } else if ("modifyCachePool".equalsIgnoreCase(cmd)) { + System.out.println(modifyCachePool); + } else if ("removeCachePool".equalsIgnoreCase(cmd)) { + System.out.println(removeCachePool); + } else if ("listCachePools".equalsIgnoreCase(cmd)) { + System.out.println(listCachePools); } else if ("help".equals(cmd)) { System.out.println(help); } else { @@ -752,6 +1035,13 @@ private void printHelp(String cmd) { System.out.println(fetchImage); System.out.println(allowSnapshot); System.out.println(disallowSnapshot); + System.out.println(addCachePool); + System.out.println(modifyCachePool); + System.out.println(removeCachePool); + System.out.println(listCachePools); + + System.out.println(disallowSnapshot); + System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); @@ -988,6 +1278,18 @@ private static void printUsage(String cmd) { } else if ("-fetchImage".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-fetchImage ]"); + } else if ("-addCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + ADD_CACHE_POOL_USAGE + "]"); + } else if ("-modifyCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + MODIFY_CACHE_POOL_USAGE + "]"); + } else if ("-removeCachePool".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + REMOVE_CACHE_POOL_USAGE + "]"); + } else if ("-listCachePools".equals(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + LIST_CACHE_POOLS_USAGE + "]"); } else { System.err.println("Usage: java DFSAdmin"); System.err.println("Note: Administrative commands can only be run as the HDFS superuser."); @@ -1013,6 +1315,10 @@ private static void printUsage(String cmd) { System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); System.err.println(" [-setBalancerBandwidth ]"); System.err.println(" [-fetchImage ]"); + System.err.println(" [" + ADD_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + MODIFY_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + REMOVE_CACHE_POOL_USAGE + "]"); + System.err.println(" [" + LIST_CACHE_POOLS_USAGE + "]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); @@ -1185,6 +1491,14 @@ public int run(String[] argv) throws Exception { exitCode = setBalancerBandwidth(argv, i); } else if ("-fetchImage".equals(cmd)) { exitCode = fetchImage(argv, i); + } else if ("-addCachePool".equals(cmd)) { + exitCode = addCachePool(argv, i); + } else if ("-modifyCachePool".equals(cmd)) { + exitCode = modifyCachePool(argv, i); + } else if ("-removeCachePool".equals(cmd)) { + exitCode = removeCachePool(argv, i); + } else if ("-listCachePools".equals(cmd)) { + exitCode = listCachePools(argv, i); } else if ("-help".equals(cmd)) { if (i < argv.length) { printHelp(argv[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 44d2b32f33c..bd248bc88b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -16356,7 +16356,7 @@ - + Verifying clrSpaceQuota operation is not permitted in safemode -fs NAMENODE -mkdir /test @@ -16374,5 +16374,70 @@ + + + Testing listing no cache pools + + -fs NAMENODE -listCachePools + + + + + + SubstringComparator + No cache pools found. + + + + + + Testing adding a cache pool + + -fs NAMENODE -addCachePool foo + + + -fs NAMENODE -removeCachePool foo + + + + SubstringComparator + Successfully added cache pool foo. + + + + + + Testing deleting a cache pool + + -fs NAMENODE -addCachePool foo + -fs NAMENODE -removeCachePool foo + + + + + + SubstringComparator + Successfully removed cache pool foo. + + + + + + Testing listing a cache pool + + -fs NAMENODE -addCachePool foo -owner bob -group bob -mode 0664 + -fs NAMENODE -listCachePools foo + + + -fs NAMENODE -removeCachePool foo + + + + SubstringComparator + foo + + + + From 02e0e158a26f81ce8375426ba0ea56db09ee36be Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 12 Sep 2013 03:55:10 +0000 Subject: [PATCH 15/51] HDFS-5158. Add command-line support for manipulating cache directives git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1522272 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop-hdfs/src/main/bin/hdfs | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 42 +++ ... AddPathBasedCacheDirectiveException.java} | 38 +- .../hadoop/hdfs/protocol/ClientProtocol.java | 25 +- ...tive.java => PathBasedCacheDirective.java} | 18 +- ...cheEntry.java => PathBasedCacheEntry.java} | 12 +- ...> RemovePathBasedCacheEntryException.java} | 24 +- ...amenodeProtocolServerSideTranslatorPB.java | 140 +++++--- .../ClientNamenodeProtocolTranslatorPB.java | 162 ++++----- .../hdfs/server/namenode/CacheManager.java | 117 +++--- .../hdfs/server/namenode/CachePool.java | 10 +- .../hdfs/server/namenode/FSNamesystem.java | 36 +- .../server/namenode/NameNodeRpcServer.java | 36 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 333 ++++++++++++++++++ .../apache/hadoop/hdfs/tools/DFSAdmin.java | 61 ++-- .../hadoop/hdfs/tools/TableListing.java | 137 +++++++ .../main/proto/ClientNamenodeProtocol.proto | 62 ++-- ...s.java => TestPathBasedCacheRequests.java} | 106 +++--- .../src/test/resources/testHDFSConf.xml | 4 +- 20 files changed, 976 insertions(+), 393 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/{AddPathCacheDirectiveException.java => AddPathBasedCacheDirectiveException.java} (62%) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/{PathCacheDirective.java => PathBasedCacheDirective.java} (79%) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/{PathCacheEntry.java => PathBasedCacheEntry.java} (85%) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/{RemovePathCacheEntryException.java => RemovePathBasedCacheEntryException.java} (70%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java rename hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/{TestPathCacheRequests.java => TestPathBasedCacheRequests.java} (70%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 69d33a6927f..4a31cd644d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -30,6 +30,9 @@ HDFS-4949 (Unreleased) HDFS-5120. Add command-line support for manipulating cache pools. (Contributed by Colin Patrick McCabe) + HDFS-5158. Add command-line support for manipulating cache directives. + (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index c06a5085983..fa00cd47d0e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -59,6 +59,7 @@ function print_usage(){ echo " Use -help to see options" echo " portmap run a portmap service" echo " nfs3 run an NFS version 3 gateway" + echo " cacheadmin configure the HDFS cache" echo "" echo "Most commands print help when invoked w/o parameters." } @@ -155,6 +156,8 @@ elif [ "$COMMAND" = "portmap" ] ; then CLASS=org.apache.hadoop.portmap.Portmap elif [ "$COMMAND" = "nfs3" ] ; then CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3 +elif [ "$COMMAND" = "cacheadmin" ] ; then + CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin else CLASS="$COMMAND" fi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c779f889a2b..c2cdcb0afaf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -67,6 +67,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -77,6 +79,7 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Progressable; import com.google.common.annotations.VisibleForTesting; @@ -1580,6 +1583,45 @@ public Boolean next(final FileSystem fs, final Path p) }.resolve(this, absF); } + /** + * Add some PathBasedCache directives. + * + * @param directives A list of PathBasedCache directives to be added. + * @return A Fallible list, where each element is either a successfully addded + * PathBasedCache entry, or an IOException describing why the directive + * could not be added. + */ + public List> + addPathBasedCacheDirective(List directives) + throws IOException { + return dfs.namenode.addPathBasedCacheDirectives(directives); + } + + /** + * Remove some PathBasedCache entries. + * + * @param ids A list of all the entry IDs to be removed. + * @return A Fallible list where each element is either a successfully removed + * ID, or an IOException describing why the ID could not be removed. + */ + public List> + removePathBasedCacheEntries(List ids) throws IOException { + return dfs.namenode.removePathBasedCacheEntries(ids); + } + + /** + * List the set of cached paths of a cache pool. Incrementally fetches results + * from the server. + * + * @param pool The cache pool to list, or null to list all pools. + * @param path The path name to list, or null to list all paths. + * @return A RemoteIterator which returns PathBasedCacheEntry objects. + */ + public RemoteIterator listPathBasedCacheEntries( + String pool, String path) throws IOException { + return dfs.namenode.listPathBasedCacheEntries(0, pool, path); + } + /** * Add a cache pool. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java similarity index 62% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java index e162463d8d4..457984353b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java @@ -20,69 +20,69 @@ import java.io.IOException; /** - * An exception which occurred when trying to add a path cache directive. + * An exception which occurred when trying to add a PathBasedCache directive. */ -public abstract class AddPathCacheDirectiveException extends IOException { +public abstract class AddPathBasedCacheDirectiveException extends IOException { private static final long serialVersionUID = 1L; - private final PathCacheDirective directive; + private final PathBasedCacheDirective directive; - public AddPathCacheDirectiveException(String description, - PathCacheDirective directive) { + public AddPathBasedCacheDirectiveException(String description, + PathBasedCacheDirective directive) { super(description); this.directive = directive; } - public PathCacheDirective getDirective() { + public PathBasedCacheDirective getDirective() { return directive; } public static final class EmptyPathError - extends AddPathCacheDirectiveException { + extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public EmptyPathError(PathCacheDirective directive) { + public EmptyPathError(PathBasedCacheDirective directive) { super("empty path in directive " + directive, directive); } } public static class InvalidPathNameError - extends AddPathCacheDirectiveException { + extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public InvalidPathNameError(PathCacheDirective directive) { + public InvalidPathNameError(PathBasedCacheDirective directive) { super("can't handle non-absolute path name " + directive.getPath(), directive); } } public static class InvalidPoolNameError - extends AddPathCacheDirectiveException { + extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public InvalidPoolNameError(PathCacheDirective directive) { + public InvalidPoolNameError(PathBasedCacheDirective directive) { super("invalid pool name '" + directive.getPool() + "'", directive); } } public static class PoolWritePermissionDeniedError - extends AddPathCacheDirectiveException { + extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public PoolWritePermissionDeniedError(PathCacheDirective directive) { + public PoolWritePermissionDeniedError(PathBasedCacheDirective directive) { super("write permission denied for pool '" + directive.getPool() + "'", directive); } } - public static class UnexpectedAddPathCacheDirectiveException - extends AddPathCacheDirectiveException { + public static class UnexpectedAddPathBasedCacheDirectiveException + extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public UnexpectedAddPathCacheDirectiveException( - PathCacheDirective directive) { + public UnexpectedAddPathBasedCacheDirectiveException( + PathBasedCacheDirective directive) { super("encountered an unexpected error when trying to " + - "add path cache directive " + directive, directive); + "add PathBasedCache directive " + directive, directive); } } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index cc31c397c1d..50f645ed657 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1098,27 +1098,27 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException; /** - * Add some path cache directives to the CacheManager. + * Add some PathBasedCache directives to the CacheManager. * - * @param directives A list of path cache directives to be added. + * @param directives A list of PathBasedCache directives to be added. * @return A Fallible list, where each element is either a successfully addded - * path cache entry, or an IOException describing why the directive + * PathBasedCache entry, or an IOException describing why the directive * could not be added. */ @AtMostOnce - public List> - addPathCacheDirectives(List directives) + public List> + addPathBasedCacheDirectives(List directives) throws IOException; /** - * Remove some path cache entries from the CacheManager. + * Remove some PathBasedCache entries from the CacheManager. * * @param ids A list of all the entry IDs to be removed from the CacheManager. * @return A Fallible list where each element is either a successfully removed * ID, or an IOException describing why the ID could not be removed. */ @AtMostOnce - public List> removePathCacheEntries(List ids) + public List> removePathBasedCacheEntries(List ids) throws IOException; /** @@ -1126,13 +1126,14 @@ public List> removePathCacheEntries(List ids) * from the server. * * @param prevId The last listed entry ID, or -1 if this is the first call to - * listPathCacheEntries. - * @param pool The cache pool to list, or the empty string to list all pools - * @return A RemoteIterator which returns PathCacheEntry objects. + * listPathBasedCacheEntries. + * @param pool The cache pool to list, or null to list all pools. + * @param path The path name to list, or null to list all paths. + * @return A RemoteIterator which returns PathBasedCacheEntry objects. */ @Idempotent - public RemoteIterator listPathCacheEntries(long prevId, - String pool) throws IOException; + public RemoteIterator listPathBasedCacheEntries(long prevId, + String pool, String path) throws IOException; /** * Add a new cache pool. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java similarity index 79% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java index 8c6d742d4cd..c6ac9c8ed05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java @@ -24,19 +24,19 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; /** * A directive to add a path to a cache pool. */ -public class PathCacheDirective implements Comparable { +public class PathBasedCacheDirective implements Comparable { private final String path; private final String pool; - public PathCacheDirective(String path, String pool) { + public PathBasedCacheDirective(String path, String pool) { Preconditions.checkNotNull(path); Preconditions.checkNotNull(pool); this.path = path; @@ -58,10 +58,10 @@ public String getPool() { } /** - * Check if this PathCacheDirective is valid. + * Check if this PathBasedCacheDirective is valid. * * @throws IOException - * If this PathCacheDirective is not valid. + * If this PathBasedCacheDirective is not valid. */ public void validate() throws IOException { if (path.isEmpty()) { @@ -76,7 +76,7 @@ public void validate() throws IOException { } @Override - public int compareTo(PathCacheDirective rhs) { + public int compareTo(PathBasedCacheDirective rhs) { return ComparisonChain.start(). compare(pool, rhs.getPool()). compare(path, rhs.getPath()). @@ -91,7 +91,7 @@ public int hashCode() { @Override public boolean equals(Object o) { try { - PathCacheDirective other = (PathCacheDirective)o; + PathBasedCacheDirective other = (PathBasedCacheDirective)o; return other.compareTo(this) == 0; } catch (ClassCastException e) { return false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java similarity index 85% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index 62b8b0968b5..7640c903373 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -23,13 +23,13 @@ import com.google.common.base.Preconditions; /** - * An entry in the NameNode's path cache. + * An entry in the NameNode's PathBasedCache. */ -public final class PathCacheEntry { +public final class PathBasedCacheEntry { private final long entryId; - private final PathCacheDirective directive; + private final PathBasedCacheDirective directive; - public PathCacheEntry(long entryId, PathCacheDirective directive) { + public PathBasedCacheEntry(long entryId, PathBasedCacheDirective directive) { Preconditions.checkArgument(entryId > 0); this.entryId = entryId; this.directive = directive; @@ -39,14 +39,14 @@ public long getEntryId() { return entryId; } - public PathCacheDirective getDirective() { + public PathBasedCacheDirective getDirective() { return directive; } @Override public boolean equals(Object o) { try { - PathCacheEntry other = (PathCacheEntry)o; + PathBasedCacheEntry other = (PathBasedCacheEntry)o; return new EqualsBuilder(). append(this.entryId, other.entryId). append(this.directive, other.directive). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java similarity index 70% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java index 04e88dfe6c9..e657d410c9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathCacheEntryException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheEntryException.java @@ -22,14 +22,14 @@ import com.google.common.base.Preconditions; /** - * An exception which occurred when trying to remove a path cache entry. + * An exception which occurred when trying to remove a PathBasedCache entry. */ -public abstract class RemovePathCacheEntryException extends IOException { +public abstract class RemovePathBasedCacheEntryException extends IOException { private static final long serialVersionUID = 1L; private final long entryId; - public RemovePathCacheEntryException(String description, long entryId) { + public RemovePathBasedCacheEntryException(String description, long entryId) { super(description); this.entryId = entryId; } @@ -39,7 +39,7 @@ public long getEntryId() { } public final static class InvalidIdException - extends RemovePathCacheEntryException { + extends RemovePathBasedCacheEntryException { private static final long serialVersionUID = 1L; public InvalidIdException(long entryId) { @@ -48,31 +48,31 @@ public InvalidIdException(long entryId) { } public final static class RemovePermissionDeniedException - extends RemovePathCacheEntryException { + extends RemovePathBasedCacheEntryException { private static final long serialVersionUID = 1L; public RemovePermissionDeniedException(long entryId) { - super("permission denied when trying to remove path cache entry id " + + super("permission denied when trying to remove PathBasedCache entry id " + entryId, entryId); } } public final static class NoSuchIdException - extends RemovePathCacheEntryException { + extends RemovePathBasedCacheEntryException { private static final long serialVersionUID = 1L; public NoSuchIdException(long entryId) { - super("there is no path cache entry with id " + entryId, entryId); + super("there is no PathBasedCache entry with id " + entryId, entryId); } } - public final static class UnexpectedRemovePathCacheEntryException - extends RemovePathCacheEntryException { + public final static class UnexpectedRemovePathBasedCacheEntryException + extends RemovePathBasedCacheEntryException { private static final long serialVersionUID = 1L; - public UnexpectedRemovePathCacheEntryException(long id) { + public UnexpectedRemovePathBasedCacheEntryException(long id) { super("encountered an unexpected error when trying to " + - "remove path cache entry id " + id, id); + "remove PathBasedCache entry id " + id, id); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 857b36faa51..a58e3d95e6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -28,9 +28,10 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -38,11 +39,11 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; @@ -51,9 +52,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; @@ -114,25 +115,25 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -174,7 +175,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.INodeId; -import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; @@ -1039,34 +1039,39 @@ public IsFileClosedResponseProto isFileClosed( } @Override - public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller, - AddPathCacheDirectivesRequestProto request) throws ServiceException { + public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcController controller, + AddPathBasedCacheDirectivesRequestProto request) throws ServiceException { try { - ArrayList input = - new ArrayList(request.getElementsCount()); + ArrayList input = + new ArrayList(request.getElementsCount()); for (int i = 0; i < request.getElementsCount(); i++) { - PathCacheDirectiveProto proto = request.getElements(i); - input.add(new PathCacheDirective(proto.getPath(), proto.getPool())); + PathBasedCacheDirectiveProto proto = request.getElements(i); + input.add(new PathBasedCacheDirective(proto.getPath(), proto.getPool())); } - List> output = server.addPathCacheDirectives(input); - AddPathCacheDirectivesResponseProto.Builder builder = - AddPathCacheDirectivesResponseProto.newBuilder(); + List> output = server.addPathBasedCacheDirectives(input); + AddPathBasedCacheDirectivesResponseProto.Builder builder = + AddPathBasedCacheDirectivesResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { try { - PathCacheEntry entry = output.get(idx).get(); + PathBasedCacheEntry entry = output.get(idx).get(); builder.addResults(entry.getEntryId()); - } catch (EmptyPathError ioe) { - builder.addResults(AddPathCacheDirectiveErrorProto. - EMPTY_PATH_ERROR_VALUE); - } catch (InvalidPathNameError ioe) { - builder.addResults(AddPathCacheDirectiveErrorProto. - INVALID_PATH_NAME_ERROR_VALUE); - } catch (InvalidPoolNameError ioe) { - builder.addResults(AddPathCacheDirectiveErrorProto. - INVALID_POOL_NAME_ERROR_VALUE); } catch (IOException ioe) { - builder.addResults(AddPathCacheDirectiveErrorProto. - UNEXPECTED_ADD_ERROR_VALUE); + if (ioe.getCause() instanceof EmptyPathError) { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + EMPTY_PATH_ERROR_VALUE); + } else if (ioe.getCause() instanceof InvalidPathNameError) { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + INVALID_PATH_NAME_ERROR_VALUE); + } else if (ioe.getCause() instanceof InvalidPoolNameError) { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + INVALID_POOL_NAME_ERROR_VALUE); + } else if (ioe.getCause() instanceof PoolWritePermissionDeniedError) { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + ADD_PERMISSION_DENIED_ERROR_VALUE); + } else { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + UNEXPECTED_ADD_ERROR_VALUE); + } } } return builder.build(); @@ -1076,29 +1081,29 @@ public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController } @Override - public RemovePathCacheEntriesResponseProto removePathCacheEntries( - RpcController controller, RemovePathCacheEntriesRequestProto request) + public RemovePathBasedCacheEntriesResponseProto removePathBasedCacheEntries( + RpcController controller, RemovePathBasedCacheEntriesRequestProto request) throws ServiceException { try { List> output = - server.removePathCacheEntries(request.getElementsList()); - RemovePathCacheEntriesResponseProto.Builder builder = - RemovePathCacheEntriesResponseProto.newBuilder(); + server.removePathBasedCacheEntries(request.getElementsList()); + RemovePathBasedCacheEntriesResponseProto.Builder builder = + RemovePathBasedCacheEntriesResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { try { long id = output.get(idx).get(); builder.addResults(id); } catch (InvalidIdException ioe) { - builder.addResults(RemovePathCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheEntryErrorProto. INVALID_CACHED_PATH_ID_ERROR_VALUE); } catch (NoSuchIdException ioe) { - builder.addResults(RemovePathCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheEntryErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE); } catch (RemovePermissionDeniedException ioe) { - builder.addResults(RemovePathCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheEntryErrorProto. REMOVE_PERMISSION_DENIED_ERROR_VALUE); } catch (IOException ioe) { - builder.addResults(RemovePathCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheEntryErrorProto. UNEXPECTED_REMOVE_ERROR_VALUE); } } @@ -1109,20 +1114,32 @@ public RemovePathCacheEntriesResponseProto removePathCacheEntries( } @Override - public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller, - ListPathCacheEntriesRequestProto request) throws ServiceException { + public ListPathBasedCacheEntriesResponseProto listPathBasedCacheEntries( + RpcController controller, ListPathBasedCacheEntriesRequestProto request) + throws ServiceException { try { - RemoteIterator iter = - server.listPathCacheEntries(request.getPrevId(), request.getPool()); - ListPathCacheEntriesResponseProto.Builder builder = - ListPathCacheEntriesResponseProto.newBuilder(); + RemoteIterator iter = + server.listPathBasedCacheEntries(request.getPrevId(), + request.hasPool() ? request.getPool() : null, + request.hasPath() ? request.getPath() : null); + ListPathBasedCacheEntriesResponseProto.Builder builder = + ListPathBasedCacheEntriesResponseProto.newBuilder(); + long prevId = 0; while (iter.hasNext()) { - PathCacheEntry entry = iter.next(); + PathBasedCacheEntry entry = iter.next(); builder.addElements( - ListPathCacheEntriesElementProto.newBuilder(). + ListPathBasedCacheEntriesElementProto.newBuilder(). setId(entry.getEntryId()). setPath(entry.getDirective().getPath()). setPool(entry.getDirective().getPool())); + prevId = entry.getEntryId(); + } + if (prevId == 0) { + builder.setHasMore(false); + } else { + iter = server.listPathBasedCacheEntries(prevId, request.getPool(), + request.getPath()); + builder.setHasMore(iter.hasNext()); } return builder.build(); } catch (IOException e) { @@ -1199,6 +1216,7 @@ public ListCachePoolsResponseProto listCachePools(RpcController controller, server.listCachePools(request.getPrevPoolName()); ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder(); + String prevPoolName = null; while (iter.hasNext()) { CachePoolInfo pool = iter.next(); ListCachePoolsResponseElementProto.Builder elemBuilder = @@ -1217,6 +1235,14 @@ public ListCachePoolsResponseProto listCachePools(RpcController controller, elemBuilder.setWeight(pool.getWeight()); } responseBuilder.addElements(elemBuilder.build()); + prevPoolName = pool.getPoolName(); + } + // fill in hasNext + if (prevPoolName == null) { + responseBuilder.setHasMore(false); + } else { + iter = server.listCachePools(prevPoolName); + responseBuilder.setHasMore(iter.hasNext()); } return responseBuilder.build(); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 53912f7a14f..ce59605c9a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -39,16 +38,16 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.UnexpectedRemovePathBasedCacheEntryException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -68,10 +67,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; @@ -109,10 +108,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; @@ -122,9 +121,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -1018,47 +1017,47 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, } } - private static IOException addPathCacheDirectivesError(long code, - PathCacheDirective directive) { - if (code == AddPathCacheDirectiveErrorProto.EMPTY_PATH_ERROR_VALUE) { + private static IOException addPathBasedCacheDirectivesError(long code, + PathBasedCacheDirective directive) { + if (code == AddPathBasedCacheDirectiveErrorProto.EMPTY_PATH_ERROR_VALUE) { return new EmptyPathError(directive); - } else if (code == AddPathCacheDirectiveErrorProto. + } else if (code == AddPathBasedCacheDirectiveErrorProto. INVALID_PATH_NAME_ERROR_VALUE) { return new InvalidPathNameError(directive); - } else if (code == AddPathCacheDirectiveErrorProto. + } else if (code == AddPathBasedCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE) { return new InvalidPoolNameError(directive); } else { - return new UnexpectedAddPathCacheDirectiveException(directive); + return new UnexpectedAddPathBasedCacheDirectiveException(directive); } } @Override - public List> addPathCacheDirectives( - List directives) throws IOException { + public List> addPathBasedCacheDirectives( + List directives) throws IOException { try { - AddPathCacheDirectivesRequestProto.Builder builder = - AddPathCacheDirectivesRequestProto.newBuilder(); - for (PathCacheDirective directive : directives) { - builder.addElements(PathCacheDirectiveProto.newBuilder(). + AddPathBasedCacheDirectivesRequestProto.Builder builder = + AddPathBasedCacheDirectivesRequestProto.newBuilder(); + for (PathBasedCacheDirective directive : directives) { + builder.addElements(PathBasedCacheDirectiveProto.newBuilder(). setPath(directive.getPath()). setPool(directive.getPool()). build()); } - AddPathCacheDirectivesResponseProto result = - rpcProxy.addPathCacheDirectives(null, builder.build()); + AddPathBasedCacheDirectivesResponseProto result = + rpcProxy.addPathBasedCacheDirectives(null, builder.build()); int resultsCount = result.getResultsCount(); - ArrayList> results = - new ArrayList>(resultsCount); + ArrayList> results = + new ArrayList>(resultsCount); for (int i = 0; i < resultsCount; i++) { - PathCacheDirective directive = directives.get(i); + PathBasedCacheDirective directive = directives.get(i); long code = result.getResults(i); if (code > 0) { - results.add(new Fallible( - new PathCacheEntry(code, directive))); + results.add(new Fallible( + new PathBasedCacheEntry(code, directive))); } else { - results.add(new Fallible( - addPathCacheDirectivesError(code, directive))); + results.add(new Fallible( + addPathBasedCacheDirectivesError(code, directive))); } } return results; @@ -1067,32 +1066,32 @@ public List> addPathCacheDirectives( } } - private static IOException removePathCacheEntriesError(long code, long id) { - if (code == RemovePathCacheEntryErrorProto. + private static IOException removePathBasedCacheEntriesError(long code, long id) { + if (code == RemovePathBasedCacheEntryErrorProto. INVALID_CACHED_PATH_ID_ERROR_VALUE) { return new InvalidIdException(id); - } else if (code == RemovePathCacheEntryErrorProto. + } else if (code == RemovePathBasedCacheEntryErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE) { return new NoSuchIdException(id); - } else if (code == RemovePathCacheEntryErrorProto. + } else if (code == RemovePathBasedCacheEntryErrorProto. REMOVE_PERMISSION_DENIED_ERROR_VALUE) { return new RemovePermissionDeniedException(id); } else { - return new UnexpectedRemovePathCacheEntryException(id); + return new UnexpectedRemovePathBasedCacheEntryException(id); } } @Override - public List> removePathCacheEntries(List ids) + public List> removePathBasedCacheEntries(List ids) throws IOException { try { - RemovePathCacheEntriesRequestProto.Builder builder = - RemovePathCacheEntriesRequestProto.newBuilder(); + RemovePathBasedCacheEntriesRequestProto.Builder builder = + RemovePathBasedCacheEntriesRequestProto.newBuilder(); for (Long id : ids) { builder.addElements(id); } - RemovePathCacheEntriesResponseProto result = - rpcProxy.removePathCacheEntries(null, builder.build()); + RemovePathBasedCacheEntriesResponseProto result = + rpcProxy.removePathBasedCacheEntries(null, builder.build()); int resultsCount = result.getResultsCount(); ArrayList> results = new ArrayList>(resultsCount); @@ -1102,7 +1101,7 @@ public List> removePathCacheEntries(List ids) results.add(new Fallible(code)); } else { results.add(new Fallible( - removePathCacheEntriesError(code, ids.get(i)))); + removePathBasedCacheEntriesError(code, ids.get(i)))); } } return results; @@ -1111,20 +1110,20 @@ public List> removePathCacheEntries(List ids) } } - private static class BatchedPathCacheEntries - implements BatchedEntries { - private ListPathCacheEntriesResponseProto response; + private static class BatchedPathBasedCacheEntries + implements BatchedEntries { + private ListPathBasedCacheEntriesResponseProto response; - BatchedPathCacheEntries(ListPathCacheEntriesResponseProto response) { + BatchedPathBasedCacheEntries(ListPathBasedCacheEntriesResponseProto response) { this.response = response; } @Override - public PathCacheEntry get(int i) { - ListPathCacheEntriesElementProto elementProto = + public PathBasedCacheEntry get(int i) { + ListPathBasedCacheEntriesElementProto elementProto = response.getElements(i); - return new PathCacheEntry(elementProto.getId(), - new PathCacheDirective(elementProto.getPath(), + return new PathBasedCacheEntry(elementProto.getId(), + new PathBasedCacheDirective(elementProto.getPath(), elementProto.getPool())); } @@ -1139,45 +1138,48 @@ public boolean hasMore() { } } - private class PathCacheEntriesIterator - extends BatchedRemoteIterator { + private class PathBasedCacheEntriesIterator + extends BatchedRemoteIterator { private final String pool; + private final String path; - public PathCacheEntriesIterator(long prevKey, String pool) { + public PathBasedCacheEntriesIterator(long prevKey, String pool, String path) { super(prevKey); this.pool = pool; + this.path = path; } @Override - public BatchedEntries makeRequest( + public BatchedEntries makeRequest( Long nextKey) throws IOException { - ListPathCacheEntriesResponseProto response; + ListPathBasedCacheEntriesResponseProto response; try { - ListPathCacheEntriesRequestProto req = - ListPathCacheEntriesRequestProto.newBuilder(). - setPrevId(nextKey). - setPool(pool). - build(); - response = rpcProxy.listPathCacheEntries(null, req); - if (response.getElementsCount() == 0) { - response = null; + ListPathBasedCacheEntriesRequestProto.Builder builder = + ListPathBasedCacheEntriesRequestProto.newBuilder().setPrevId(nextKey); + if (pool != null) { + builder.setPool(pool); } + if (path != null) { + builder.setPath(path); + } + ListPathBasedCacheEntriesRequestProto req = builder.build(); + response = rpcProxy.listPathBasedCacheEntries(null, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } - return new BatchedPathCacheEntries(response); + return new BatchedPathBasedCacheEntries(response); } @Override - public Long elementToPrevKey(PathCacheEntry element) { + public Long elementToPrevKey(PathBasedCacheEntry element) { return element.getEntryId(); } } @Override - public RemoteIterator listPathCacheEntries(long prevId, - String pool) throws IOException { - return new PathCacheEntriesIterator(prevId, pool); + public RemoteIterator listPathBasedCacheEntries(long prevId, + String pool, String path) throws IOException { + return new PathBasedCacheEntriesIterator(prevId, pool, path); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index b71e4d0d369..471defac1ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -35,16 +35,17 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.UnexpectedRemovePathBasedCacheEntryException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.util.Fallible; /** @@ -56,17 +57,17 @@ final class CacheManager { /** * Cache entries, sorted by ID. * - * listPathCacheEntries relies on the ordering of elements in this map + * listPathBasedCacheEntries relies on the ordering of elements in this map * to track what has already been listed by the client. */ - private final TreeMap entriesById = - new TreeMap(); + private final TreeMap entriesById = + new TreeMap(); /** * Cache entries, sorted by directive. */ - private final TreeMap entriesByDirective = - new TreeMap(); + private final TreeMap entriesByDirective = + new TreeMap(); /** * Cache pools, sorted by name. @@ -114,53 +115,53 @@ synchronized long getNextEntryId() throws IOException { return nextEntryId++; } - private synchronized Fallible addDirective( - PathCacheDirective directive, FSPermissionChecker pc) { + private synchronized Fallible addDirective( + PathBasedCacheDirective directive, FSPermissionChecker pc) { CachePool pool = cachePools.get(directive.getPool()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); - return new Fallible( + return new Fallible( new InvalidPoolNameError(directive)); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("addDirective " + directive + ": write permission denied."); - return new Fallible( + return new Fallible( new PoolWritePermissionDeniedError(directive)); } try { directive.validate(); } catch (IOException ioe) { LOG.info("addDirective " + directive + ": validation failed."); - return new Fallible(ioe); + return new Fallible(ioe); } // Check if we already have this entry. - PathCacheEntry existing = entriesByDirective.get(directive); + PathBasedCacheEntry existing = entriesByDirective.get(directive); if (existing != null) { // Entry already exists: return existing entry. LOG.info("addDirective " + directive + ": there is an " + "existing directive " + existing); - return new Fallible(existing); + return new Fallible(existing); } // Add a new entry with the next available ID. - PathCacheEntry entry; + PathBasedCacheEntry entry; try { - entry = new PathCacheEntry(getNextEntryId(), directive); + entry = new PathBasedCacheEntry(getNextEntryId(), directive); } catch (IOException ioe) { - return new Fallible( - new UnexpectedAddPathCacheDirectiveException(directive)); + return new Fallible( + new UnexpectedAddPathBasedCacheDirectiveException(directive)); } LOG.info("addDirective " + directive + ": added cache directive " + directive); entriesByDirective.put(directive, entry); entriesById.put(entry.getEntryId(), entry); - return new Fallible(entry); + return new Fallible(entry); } - public synchronized List> addDirectives( - List directives, FSPermissionChecker pc) { - ArrayList> results = - new ArrayList>(directives.size()); - for (PathCacheDirective directive: directives) { + public synchronized List> addDirectives( + List directives, FSPermissionChecker pc) { + ArrayList> results = + new ArrayList>(directives.size()); + for (PathBasedCacheDirective directive: directives) { results.add(addDirective(directive, pc)); } return results; @@ -174,7 +175,7 @@ private synchronized Fallible removeEntry(long entryId, return new Fallible(new InvalidIdException(entryId)); } // Find the entry. - PathCacheEntry existing = entriesById.get(entryId); + PathBasedCacheEntry existing = entriesById.get(entryId); if (existing == null) { LOG.info("removeEntry " + entryId + ": entry not found."); return new Fallible(new NoSuchIdException(entryId)); @@ -184,7 +185,7 @@ private synchronized Fallible removeEntry(long entryId, LOG.info("removeEntry " + entryId + ": pool not found for directive " + existing.getDirective()); return new Fallible( - new UnexpectedRemovePathCacheEntryException(entryId)); + new UnexpectedRemovePathBasedCacheEntryException(entryId)); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("removeEntry " + entryId + ": write permission denied to " + @@ -198,7 +199,7 @@ private synchronized Fallible removeEntry(long entryId, LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + existing + " in entriesByDirective"); return new Fallible( - new UnexpectedRemovePathCacheEntryException(entryId)); + new UnexpectedRemovePathBasedCacheEntryException(entryId)); } entriesById.remove(entryId); return new Fallible(entryId); @@ -214,33 +215,44 @@ public synchronized List> removeEntries(List entryIds, return results; } - public synchronized BatchedListEntries - listPathCacheEntries(long prevId, String filterPool, FSPermissionChecker pc) { + public synchronized BatchedListEntries + listPathBasedCacheEntries(long prevId, String filterPool, + String filterPath, FSPermissionChecker pc) throws IOException { final int NUM_PRE_ALLOCATED_ENTRIES = 16; - ArrayList replies = - new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); - int numReplies = 0; - SortedMap tailMap = entriesById.tailMap(prevId + 1); - for (Entry cur : tailMap.entrySet()) { - if (numReplies >= maxListCacheDirectivesResponses) { - return new BatchedListEntries(replies, true); + if (filterPath != null) { + if (!DFSUtil.isValidName(filterPath)) { + throw new IOException("invalid path name '" + filterPath + "'"); } - PathCacheEntry curEntry = cur.getValue(); - if (!filterPool.isEmpty() && - !cur.getValue().getDirective().getPool().equals(filterPool)) { + } + ArrayList replies = + new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); + int numReplies = 0; + SortedMap tailMap = entriesById.tailMap(prevId + 1); + for (Entry cur : tailMap.entrySet()) { + if (numReplies >= maxListCacheDirectivesResponses) { + return new BatchedListEntries(replies, true); + } + PathBasedCacheEntry curEntry = cur.getValue(); + PathBasedCacheDirective directive = cur.getValue().getDirective(); + if (filterPool != null && + !directive.getPool().equals(filterPool)) { + continue; + } + if (filterPath != null && + !directive.getPath().equals(filterPath)) { continue; } CachePool pool = cachePools.get(curEntry.getDirective().getPool()); if (pool == null) { - LOG.error("invalid pool for PathCacheEntry " + curEntry); + LOG.error("invalid pool for PathBasedCacheEntry " + curEntry); continue; } - if (pc.checkPermission(pool, FsAction.EXECUTE)) { + if (pc.checkPermission(pool, FsAction.READ)) { replies.add(cur.getValue()); numReplies++; } } - return new BatchedListEntries(replies, false); + return new BatchedListEntries(replies, false); } /** @@ -300,8 +312,7 @@ public synchronized void modifyCachePool(CachePoolInfo info) } if (info.getMode() != null) { pool.setMode(info.getMode()); - bld.append(prefix). - append(String.format("set mode to 0%3o", info.getMode())); + bld.append(prefix).append("set mode to " + info.getMode()); prefix = "; "; } if (info.getWeight() != null) { @@ -334,10 +345,10 @@ public synchronized void removeCachePool(String poolName) // Remove entries using this pool // TODO: could optimize this somewhat to avoid the need to iterate // over all entries in entriesByDirective - Iterator> iter = + Iterator> iter = entriesByDirective.entrySet().iterator(); while (iter.hasNext()) { - Entry entry = iter.next(); + Entry entry = iter.next(); if (entry.getKey().getPool().equals(poolName)) { entriesById.remove(entry.getValue().getEntryId()); iter.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index 36ebd402e41..d645c8270d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -51,6 +51,14 @@ public final class CachePool { @Nonnull private String groupName; + /** + * Cache pool permissions. + * + * READ permission means that you can list the cache directives in this pool. + * WRITE permission means that you can add, remove, or modify cache directives + * in this pool. + * EXECUTE permission is unused. + */ @Nonnull private FsPermission mode; @@ -74,7 +82,7 @@ public CachePool(String poolName, String ownerName, String groupName, } this.groupName = ugi.getPrimaryGroupName(); } else { - this.groupName = ownerName; + this.groupName = groupName; } this.mode = mode != null ? new FsPermission(mode): FsPermission.getCachePoolDefault(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 42bc6205c31..720a5a96db1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -143,8 +143,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -6750,27 +6750,27 @@ void removeSnapshottableDirs(List toRemove) { } @SuppressWarnings("unchecked") - List> addPathCacheDirectives( - List directives) throws IOException { + List> addPathBasedCacheDirectives( + List directives) throws IOException { CacheEntryWithPayload retryCacheEntry = RetryCache.waitForCompletion(retryCache, null); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (List>) retryCacheEntry.getPayload(); + return (List>) retryCacheEntry.getPayload(); } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; boolean success = false; - List> results = null; + List> results = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { throw new SafeModeException( - "Cannot add path cache directive", safeMode); + "Cannot add PathBasedCache directive", safeMode); } results = cacheManager.addDirectives(directives, pc); - //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 + //getEditLog().logAddPathBasedCacheDirectives(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); @@ -6778,7 +6778,7 @@ List> addPathCacheDirectives( getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addPathCacheDirectives", null, null, null); + logAuditEvent(success, "addPathBasedCacheDirectives", null, null, null); } RetryCache.setState(retryCacheEntry, success, results); } @@ -6786,7 +6786,7 @@ List> addPathCacheDirectives( } @SuppressWarnings("unchecked") - List> removePathCacheEntries(List ids) throws IOException { + List> removePathBasedCacheEntries(List ids) throws IOException { CacheEntryWithPayload retryCacheEntry = RetryCache.waitForCompletion(retryCache, null); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { @@ -6802,15 +6802,15 @@ List> removePathCacheEntries(List ids) throws IOException { checkOperation(OperationCategory.WRITE); if (isInSafeMode()) { throw new SafeModeException( - "Cannot remove path cache directives", safeMode); + "Cannot remove PathBasedCache directives", safeMode); } results = cacheManager.removeEntries(ids, pc); - //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 + //getEditLog().logRemovePathBasedCacheEntries(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removePathCacheEntries", null, null, null); + logAuditEvent(success, "removePathBasedCacheEntries", null, null, null); } RetryCache.setState(retryCacheEntry, success, results); } @@ -6818,22 +6818,22 @@ List> removePathCacheEntries(List ids) throws IOException { return results; } - BatchedListEntries listPathCacheEntries(long startId, - String pool) throws IOException { + BatchedListEntries listPathBasedCacheEntries(long startId, + String pool, String path) throws IOException { final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; - BatchedListEntries results; + BatchedListEntries results; checkOperation(OperationCategory.READ); readLock(); boolean success = false; try { checkOperation(OperationCategory.READ); - results = cacheManager.listPathCacheEntries(startId, pool, pc); + results = cacheManager.listPathBasedCacheEntries(startId, pool, path, pc); success = true; } finally { readUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "listPathCacheEntries", null, null, null); + logAuditEvent(success, "listPathBasedCacheEntries", null, null, null); } } return results; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index bcaefd4df07..b7cd4e14dc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -62,8 +62,8 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -1211,43 +1211,47 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, } @Override - public List> addPathCacheDirectives( - List paths) throws IOException { - return namesystem.addPathCacheDirectives(paths); + public List> addPathBasedCacheDirectives( + List paths) throws IOException { + return namesystem.addPathBasedCacheDirectives(paths); } @Override - public List> removePathCacheEntries(List ids) + public List> removePathBasedCacheEntries(List ids) throws IOException { - return namesystem.removePathCacheEntries(ids); + return namesystem.removePathBasedCacheEntries(ids); } - private class ServerSidePathCacheEntriesIterator - extends BatchedRemoteIterator { + private class ServerSidePathBasedCacheEntriesIterator + extends BatchedRemoteIterator { private final String pool; - public ServerSidePathCacheEntriesIterator(Long firstKey, String pool) { + private final String path; + + public ServerSidePathBasedCacheEntriesIterator(Long firstKey, String pool, + String path) { super(firstKey); this.pool = pool; + this.path = path; } @Override - public BatchedEntries makeRequest( + public BatchedEntries makeRequest( Long nextKey) throws IOException { - return namesystem.listPathCacheEntries(nextKey, pool); + return namesystem.listPathBasedCacheEntries(nextKey, pool, path); } @Override - public Long elementToPrevKey(PathCacheEntry entry) { + public Long elementToPrevKey(PathBasedCacheEntry entry) { return entry.getEntryId(); } } @Override - public RemoteIterator listPathCacheEntries(long prevId, - String pool) throws IOException { - return new ServerSidePathCacheEntriesIterator(prevId, pool); + public RemoteIterator listPathBasedCacheEntries(long prevId, + String pool, String path) throws IOException { + return new ServerSidePathBasedCacheEntriesIterator(prevId, pool, path); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java new file mode 100644 index 00000000000..a989f5ff76a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -0,0 +1,333 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.tools.TableListing.Justification; +import org.apache.hadoop.util.Fallible; +import org.apache.hadoop.util.StringUtils; + +/** + * This class implements command-line operations on the HDFS Cache. + */ +@InterfaceAudience.Private +public class CacheAdmin { + private static Configuration conf = new Configuration(); + + private static DistributedFileSystem getDFS() throws IOException { + FileSystem fs = FileSystem.get(conf); + if (!(fs instanceof DistributedFileSystem)) { + throw new IllegalArgumentException("FileSystem " + fs.getUri() + + " is not an HDFS file system"); + } + return (DistributedFileSystem)fs; + } + + interface Command { + String getName(); + String getShortUsage(); + String getLongUsage(); + int run(List args) throws IOException; + } + + private static class AddPathBasedCacheDirectiveCommand implements Command { + @Override + public String getName() { + return "-addPath"; + } + + @Override + public String getShortUsage() { + return "[-addPath -path -pool ]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + + "Adds a new PathBasedCache directive.\n" + + " The new path to cache.\n" + + " Paths may be either directories or files.\n" + + " The pool which this directive will reside in.\n" + + " You must have write permission on the cache pool in order\n" + + " to add new entries to it.\n"; + } + + @Override + public int run(List args) throws IOException { + String path = StringUtils.popOptionWithArgument("-path", args); + if (path == null) { + System.err.println("You must specify a path with -path."); + return 1; + } + String poolName = StringUtils.popOptionWithArgument("-pool", args); + if (poolName == null) { + System.err.println("You must specify a pool name with -pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + + DistributedFileSystem dfs = getDFS(); + List directives = + new LinkedList(); + PathBasedCacheDirective directive = new PathBasedCacheDirective(path, poolName); + directives.add(directive); + List> results = + dfs.addPathBasedCacheDirective(directives); + try { + PathBasedCacheEntry entry = results.get(0).get(); + System.out.println("Added PathBasedCache entry " + entry.getEntryId()); + return 0; + } catch (IOException e) { + System.err.println("Error adding cache directive " + directive + ": " + + e.getMessage()); + return 1; + } + } + } + + private static class RemovePathBasedCacheDirectiveCommand implements Command { + @Override + public String getName() { + return "-removePath"; + } + + @Override + public String getShortUsage() { + return "[-removePath ]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + + "Remove a cache directive.\n" + + " The id of the cache directive to remove.\n" + + " You must have write permission on the pool where the\n" + + " directive resides in order to remove it. To see a list\n" + + " of PathBasedCache directive IDs, use the -list command.\n"; + } + + @Override + public int run(List args) throws IOException { + String idString= StringUtils.popFirstNonOption(args); + if (idString == null) { + System.err.println("You must specify a directive ID to remove."); + return 1; + } + long id = Long.valueOf(idString); + if (id <= 0) { + System.err.println("Invalid directive ID " + id + ": ids must " + + "be greater than 0."); + return 1; + } + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + DistributedFileSystem dfs = getDFS(); + List ids = new LinkedList(); + ids.add(id); + List> results = dfs.removePathBasedCacheEntries(ids); + try { + Long resultId = results.get(0).get(); + System.out.println("Removed PathBasedCache entry " + resultId); + return 0; + } catch (IOException e) { + System.err.println("Error removing cache directive " + id + ": " + + e.getMessage()); + return 1; + } + } + } + + private static class ListPathBasedCacheDirectiveCommand implements Command { + @Override + public String getName() { + return "-listPaths"; + } + + @Override + public String getShortUsage() { + return "[-listPaths [-path ] [-pool ]]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + + "List PathBasedCache directives.\n" + + " If a -path argument is given, we will list only\n" + + " PathBasedCache entries with this path.\n" + + " Note that if there is a PathBasedCache directive for \n" + + " in a cache pool that we don't have read access for, it\n" + + " not be listed. If there are unreadable cache pools, a\n" + + " message will be printed.\n" + + " may be incomplete.\n" + + " If a -pool argument is given, we will list only path\n" + + " cache entries in that pool.\n"; + } + + @Override + public int run(List args) throws IOException { + String pathFilter = StringUtils.popOptionWithArgument("-path", args); + String poolFilter = StringUtils.popOptionWithArgument("-pool", args); + if (!args.isEmpty()) { + System.err.println("Can't understand argument: " + args.get(0)); + return 1; + } + TableListing tableListing = new TableListing.Builder(). + addField("ID", Justification.RIGHT). + addField("POOL", Justification.LEFT). + addField("PATH", Justification.LEFT). + build(); + DistributedFileSystem dfs = getDFS(); + RemoteIterator iter = + dfs.listPathBasedCacheEntries(poolFilter, pathFilter); + int numEntries = 0; + while (iter.hasNext()) { + PathBasedCacheEntry entry = iter.next(); + String row[] = new String[] { + "" + entry.getEntryId(), + entry.getDirective().getPool(), + entry.getDirective().getPath(), + }; + tableListing.addRow(row); + numEntries++; + } + System.out.print(String.format("Found %d entr%s\n", + numEntries, numEntries == 1 ? "y" : "ies")); + if (numEntries > 0) { + System.out.print(tableListing.build()); + } + return 0; + } + } + + private static class HelpCommand implements Command { + @Override + public String getName() { + return "-help"; + } + + @Override + public String getShortUsage() { + return "[-help ]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + + "Get detailed help about a command.\n" + + " The command to get detailed help for. If no " + + " command-name is specified, we will print detailed help " + + " about all commands"; + } + + @Override + public int run(List args) throws IOException { + if (args.size() == 0) { + for (Command command : COMMANDS) { + System.err.println(command.getLongUsage()); + } + return 0; + } + if (args.size() != 1) { + System.out.println("You must give exactly one argument to -help."); + return 0; + } + String commandName = args.get(0); + commandName.replaceAll("^[-]*", ""); + Command command = determineCommand(commandName); + if (command == null) { + System.err.print("Sorry, I don't know the command '" + + commandName + "'.\n"); + System.err.print("Valid command names are:\n"); + String separator = ""; + for (Command c : COMMANDS) { + System.err.print(separator + c.getName()); + separator = ", "; + } + return 1; + } + System.err.print(command.getLongUsage()); + return 0; + } + } + + private static Command[] COMMANDS = { + new AddPathBasedCacheDirectiveCommand(), + new RemovePathBasedCacheDirectiveCommand(), + new ListPathBasedCacheDirectiveCommand(), + new HelpCommand(), + }; + + private static void printUsage(boolean longUsage) { + System.err.println( + "Usage: bin/hdfs cacheadmin [COMMAND]"); + for (Command command : COMMANDS) { + if (longUsage) { + System.err.print(command.getLongUsage()); + } else { + System.err.print(" " + command.getShortUsage()); + } + } + System.err.println(); + } + + private static Command determineCommand(String commandName) { + for (int i = 0; i < COMMANDS.length; i++) { + if (COMMANDS[i].getName().equals(commandName)) { + return COMMANDS[i]; + } + } + return null; + } + + public static void main(String[] argsArray) throws IOException { + if (argsArray.length == 0) { + printUsage(false); + System.exit(1); + } + Command command = determineCommand(argsArray[0]); + if (command == null) { + System.err.println("Can't understand command '" + argsArray[0] + "'"); + if (!argsArray[0].startsWith("-")) { + System.err.println("Command names must start with dashes."); + } + printUsage(false); + System.exit(1); + } + List args = new LinkedList(); + for (int j = 1; j < argsArray.length; j++) { + args.add(argsArray[j]); + } + System.exit(command.run(args)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 912569a9c1c..e4b3b8cb056 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; +import org.apache.hadoop.hdfs.tools.TableListing.Justification; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; @@ -634,15 +635,6 @@ public int removeCachePool(String argsArray[], int idx) throws IOException { final private static String LIST_CACHE_POOLS_USAGE = "-listCachePools] [-verbose] [name]"; - private void listCachePool(CachePoolInfo info) { - System.out.print(String.format("%s\n", info.getPoolName())); - System.out.print(String.format("owner:\t%s\n", info.getOwnerName())); - System.out.print(String.format("group:\t%s\n", info.getGroupName())); - System.out.print(String.format("mode:\t%s\n", info.getMode())); - System.out.print(String.format("weight:\t%d\n", info.getWeight())); - System.out.print("\n"); - } - public int listCachePools(String argsArray[], int idx) throws IOException { List args = new LinkedList(); for (int i = idx; i < argsArray.length; i++) { @@ -655,39 +647,44 @@ public int listCachePools(String argsArray[], int idx) throws IOException { System.err.println("usage is " + LIST_CACHE_POOLS_USAGE); return 1; } - boolean gotResults = false; DistributedFileSystem dfs = getDFS(); + TableListing listing = new TableListing.Builder(). + addField("NAME", Justification.LEFT). + addField("OWNER", Justification.LEFT). + addField("GROUP", Justification.LEFT). + addField("MODE", Justification.LEFT). + addField("WEIGHT", Justification.RIGHT). + build(); + int numResults = 0; try { RemoteIterator iter = dfs.listCachePools(); - if (name != null) { - while (iter.hasNext()) { - CachePoolInfo info = iter.next(); - if (info.getPoolName().equals(name)) { - listCachePool(info); - gotResults = true; - return 0; + while (iter.hasNext()) { + CachePoolInfo info = iter.next(); + if (name == null || info.getPoolName().equals(name)) { + listing.addRow(new String[] { + info.getPoolName(), + info.getOwnerName(), + info.getGroupName(), + info.getMode().toString(), + info.getWeight().toString(), + }); + ++numResults; + if (name != null) { + break; } } - } else { - while (iter.hasNext()) { - listCachePool(iter.next()); - gotResults = true; - } } } catch (IOException e) { throw new RemoteException(e.getClass().getName(), e.getMessage()); } - int ret = 0; - if (!gotResults) { - if (name != null) { - System.out.println("No cache pool named " + name + " found."); - ret = 1; - } else { - System.out.println("No cache pools found."); - ret = 1; - } + System.out.print(String.format("Found %d result%s.\n", numResults, + (numResults == 1 ? "" : "s"))); + if (numResults > 0) { + System.out.print(listing.build()); } - return ret; + // If there are no results, we return 1 (failure exit code); + // otherwise we return 0 (success exit code). + return (numResults == 0) ? 1 : 0; } public int rollEdits() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java new file mode 100644 index 00000000000..aded360a428 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.tools; + +import java.util.LinkedList; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * This class implements a "table listing" with column headers. + */ +@InterfaceAudience.Private +public class TableListing { + public enum Justification { + LEFT, + RIGHT; + } + + private static class Column { + private final LinkedList rows; + private final Justification justification; + private int maxLength; + + Column(String title, Justification justification) { + this.rows = new LinkedList(); + this.justification = justification; + this.maxLength = 0; + addRow(title); + } + + private void addRow(String val) { + if ((val.length() + 1) > maxLength) { + maxLength = val.length() + 1; + } + rows.add(val); + } + + String getRow(int i) { + String raw = rows.get(i); + int paddingLength = maxLength - raw.length(); + String padding = (paddingLength <= 0) ? "" : + StringUtils.repeat(" ", paddingLength); + if (justification == Justification.LEFT) { + return raw + padding; + } else { + return padding + raw; + } + } + } + + public static class Builder { + private final LinkedList columns = new LinkedList(); + + /** + * Create a new Builder. + */ + public Builder() { + } + + /** + * Add a new field to the Table under construction. + * + * @param title Field title. + * @param leftJustified Whether or not the field is left justified. + * @return this. + */ + public Builder addField(String title, Justification justification) { + columns.add(new Column(title, justification)); + return this; + } + + /** + * Create a new TableListing. + */ + public TableListing build() { + return new TableListing(columns.toArray(new Column[0])); + } + } + + private final Column columns[]; + + private int numRows; + + TableListing(Column columns[]) { + this.columns = columns; + this.numRows = 0; + } + + /** + * Add a new row. + * + * @param row The row of objects to add-- one per column. + */ + public void addRow(String row[]) { + if (row.length != columns.length) { + throw new RuntimeException("trying to add a row with " + row.length + + " columns, but we have " + columns.length + " columns."); + } + for (int i = 0; i < columns.length; i++) { + columns[i].addRow(row[i]); + } + numRows++; + } + + /** + * Convert the table to a string. + */ + public String build() { + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < numRows + 1; i++) { + String prefix = ""; + for (int j = 0; j < columns.length; j++) { + builder.append(prefix); + prefix = " "; + builder.append(columns[j].getRow(i)); + } + builder.append("\n"); + } + return builder.toString(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index c097c3b696a..f2f4c5e7b96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -363,54 +363,56 @@ message IsFileClosedResponseProto { required bool result = 1; } -message PathCacheDirectiveProto { +message PathBasedCacheDirectiveProto { required string path = 1; required string pool = 2; } -message AddPathCacheDirectivesRequestProto { - repeated PathCacheDirectiveProto elements = 1; +message AddPathBasedCacheDirectivesRequestProto { + repeated PathBasedCacheDirectiveProto elements = 1; } -message AddPathCacheDirectivesResponseProto { +message AddPathBasedCacheDirectivesResponseProto { repeated int64 results = 1 [packed=true]; } -enum AddPathCacheDirectiveErrorProto { - EMPTY_PATH_ERROR = -1; - INVALID_PATH_NAME_ERROR = -2; - INVALID_POOL_NAME_ERROR = -3; - UNEXPECTED_ADD_ERROR = -4; +enum AddPathBasedCacheDirectiveErrorProto { + UNEXPECTED_ADD_ERROR = -1; + EMPTY_PATH_ERROR = -2; + INVALID_PATH_NAME_ERROR = -3; + INVALID_POOL_NAME_ERROR = -4; + ADD_PERMISSION_DENIED_ERROR = -5; } -message RemovePathCacheEntriesRequestProto { +message RemovePathBasedCacheEntriesRequestProto { repeated int64 elements = 1 [packed=true]; } -message RemovePathCacheEntriesResponseProto { +message RemovePathBasedCacheEntriesResponseProto { repeated int64 results = 1 [packed=true]; } -enum RemovePathCacheEntryErrorProto { - INVALID_CACHED_PATH_ID_ERROR = -1; - NO_SUCH_CACHED_PATH_ID_ERROR = -2; - REMOVE_PERMISSION_DENIED_ERROR = -3; - UNEXPECTED_REMOVE_ERROR = -4; +enum RemovePathBasedCacheEntryErrorProto { + UNEXPECTED_REMOVE_ERROR = -1; + INVALID_CACHED_PATH_ID_ERROR = -2; + NO_SUCH_CACHED_PATH_ID_ERROR = -3; + REMOVE_PERMISSION_DENIED_ERROR = -4; } -message ListPathCacheEntriesRequestProto { +message ListPathBasedCacheEntriesRequestProto { required int64 prevId = 1; - required string pool = 2; + optional string pool = 2; + optional string path = 3; } -message ListPathCacheEntriesElementProto { +message ListPathBasedCacheEntriesElementProto { required int64 id = 1; - required string path = 2; - required string pool = 3; + required string pool = 2; + required string path = 3; } -message ListPathCacheEntriesResponseProto { - repeated ListPathCacheEntriesElementProto elements = 1; +message ListPathBasedCacheEntriesResponseProto { + repeated ListPathBasedCacheEntriesElementProto elements = 1; required bool hasMore = 2; } @@ -449,7 +451,7 @@ message ListCachePoolsRequestProto { message ListCachePoolsResponseProto { repeated ListCachePoolsResponseElementProto elements = 1; - optional bool hasMore = 2; + required bool hasMore = 2; } message ListCachePoolsResponseElementProto { @@ -641,12 +643,12 @@ service ClientNamenodeProtocol { returns(ListCorruptFileBlocksResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); - rpc addPathCacheDirectives(AddPathCacheDirectivesRequestProto) - returns (AddPathCacheDirectivesResponseProto); - rpc removePathCacheEntries(RemovePathCacheEntriesRequestProto) - returns (RemovePathCacheEntriesResponseProto); - rpc listPathCacheEntries(ListPathCacheEntriesRequestProto) - returns (ListPathCacheEntriesResponseProto); + rpc addPathBasedCacheDirectives(AddPathBasedCacheDirectivesRequestProto) + returns (AddPathBasedCacheDirectivesResponseProto); + rpc removePathBasedCacheEntries(RemovePathBasedCacheEntriesRequestProto) + returns (RemovePathBasedCacheEntriesResponseProto); + rpc listPathBasedCacheEntries(ListPathBasedCacheEntriesRequestProto) + returns (ListPathBasedCacheEntriesResponseProto); rpc addCachePool(AddCachePoolRequestProto) returns(AddCachePoolResponseProto); rpc modifyCachePool(ModifyCachePoolRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java similarity index 70% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java rename to hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index ddf8e169708..bf06eeb3ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -34,23 +34,23 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.PathCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Fallible; import org.junit.Test; -public class TestPathCacheRequests { - static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); +public class TestPathBasedCacheRequests { + static final Log LOG = LogFactory.getLog(TestPathBasedCacheRequests.class); private static final UserGroupInformation unprivilegedUser = UserGroupInformation.createRemoteUser("unprivilegedUser"); @@ -101,11 +101,16 @@ public void testCreateAndModifyPools() throws Exception { proto.addCachePool(new CachePoolInfo("pool1"). setOwnerName("abc").setGroupName("123"). setMode(new FsPermission((short)0755)).setWeight(150)); - proto.modifyCachePool(new CachePoolInfo("pool1"). - setOwnerName("def").setGroupName("456")); RemoteIterator iter = proto.listCachePools(""); CachePoolInfo info = iter.next(); assertEquals("pool1", info.getPoolName()); + assertEquals("abc", info.getOwnerName()); + assertEquals("123", info.getGroupName()); + proto.modifyCachePool(new CachePoolInfo("pool1"). + setOwnerName("def").setGroupName("456")); + iter = proto.listCachePools(""); + info = iter.next(); + assertEquals("pool1", info.getPoolName()); assertEquals("def", info.getOwnerName()); assertEquals("456", info.getGroupName()); assertEquals(new FsPermission((short)0755), info.getMode()); @@ -127,16 +132,16 @@ public void testCreateAndModifyPools() throws Exception { } private static void validateListAll( - RemoteIterator iter, + RemoteIterator iter, long id0, long id1, long id2) throws Exception { - Assert.assertEquals(new PathCacheEntry(id0, - new PathCacheDirective("/alpha", "pool1")), + Assert.assertEquals(new PathBasedCacheEntry(id0, + new PathBasedCacheDirective("/alpha", "pool1")), iter.next()); - Assert.assertEquals(new PathCacheEntry(id1, - new PathCacheDirective("/beta", "pool2")), + Assert.assertEquals(new PathBasedCacheEntry(id1, + new PathBasedCacheDirective("/beta", "pool2")), iter.next()); - Assert.assertEquals(new PathCacheEntry(id2, - new PathCacheDirective("/gamma", "pool1")), + Assert.assertEquals(new PathBasedCacheEntry(id2, + new PathBasedCacheDirective("/gamma", "pool1")), iter.next()); Assert.assertFalse(iter.hasNext()); } @@ -159,18 +164,19 @@ public void testSetAndGet() throws Exception { proto.addCachePool(new CachePoolInfo("pool4"). setMode(new FsPermission((short)0))); - List> addResults1 = + List> addResults1 = unprivilegedUser.doAs(new PrivilegedExceptionAction< - List>>() { + List>>() { @Override - public List> run() throws IOException { - return proto.addPathCacheDirectives(Arrays.asList( - new PathCacheDirective[] { - new PathCacheDirective("/alpha", "pool1"), - new PathCacheDirective("/beta", "pool2"), - new PathCacheDirective("", "pool3"), - new PathCacheDirective("/zeta", "nonexistent_pool"), - new PathCacheDirective("/zeta", "pool4") + public List> run() throws IOException { + return proto.addPathBasedCacheDirectives(Arrays.asList( + new PathBasedCacheDirective[] { + new PathBasedCacheDirective("/alpha", "pool1"), + new PathBasedCacheDirective("/beta", "pool2"), + new PathBasedCacheDirective("", "pool3"), + new PathBasedCacheDirective("/zeta", "nonexistent_pool"), + new PathBasedCacheDirective("/zeta", "pool4"), + new PathBasedCacheDirective("//illegal/path/", "pool1") })); } }); @@ -197,28 +203,36 @@ public List> run() throws IOException { Assert.assertTrue(ioe.getCause() instanceof PoolWritePermissionDeniedError); } + try { + addResults1.get(5).get(); + Assert.fail("expected an error when adding a malformed path " + + "to the cache directives."); + } catch (IOException ioe) { + //Assert.assertTrue(ioe.getCause() + //instanceof PoolWritePermissionDeniedError); + } - List> addResults2 = - proto.addPathCacheDirectives(Arrays.asList( - new PathCacheDirective[] { - new PathCacheDirective("/alpha", "pool1"), - new PathCacheDirective("/theta", ""), - new PathCacheDirective("bogus", "pool1"), - new PathCacheDirective("/gamma", "pool1") + List> addResults2 = + proto.addPathBasedCacheDirectives(Arrays.asList( + new PathBasedCacheDirective[] { + new PathBasedCacheDirective("/alpha", "pool1"), + new PathBasedCacheDirective("/theta", ""), + new PathBasedCacheDirective("bogus", "pool1"), + new PathBasedCacheDirective("/gamma", "pool1") })); long id = addResults2.get(0).get().getEntryId(); Assert.assertEquals("expected to get back the same ID as last time " + - "when re-adding an existing path cache directive.", ids1[0], id); + "when re-adding an existing PathBasedCache directive.", ids1[0], id); try { addResults2.get(1).get(); - Assert.fail("expected an error when adding a path cache " + + Assert.fail("expected an error when adding a PathBasedCache " + "directive with an empty pool name."); } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); } try { addResults2.get(2).get(); - Assert.fail("expected an error when adding a path cache " + + Assert.fail("expected an error when adding a PathBasedCache " + "directive with a non-absolute path name."); } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof InvalidPathNameError); @@ -226,20 +240,20 @@ public List> run() throws IOException { long ids2[] = new long[1]; ids2[0] = addResults2.get(3).get().getEntryId(); - RemoteIterator iter = - proto.listPathCacheEntries(0, ""); + RemoteIterator iter = + proto.listPathBasedCacheEntries(0, null, null); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathCacheEntries(0, ""); + iter = proto.listPathBasedCacheEntries(0, null, null); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathCacheEntries(0, "pool3"); + iter = proto.listPathBasedCacheEntries(0, "pool3", null); Assert.assertFalse(iter.hasNext()); - iter = proto.listPathCacheEntries(0, "pool2"); + iter = proto.listPathBasedCacheEntries(0, "pool2", null); Assert.assertEquals(addResults1.get(1).get(), iter.next()); Assert.assertFalse(iter.hasNext()); List> removeResults1 = - proto.removePathCacheEntries(Arrays.asList( + proto.removePathBasedCacheEntries(Arrays.asList( new Long[] { ids1[1], -42L, 999999L })); Assert.assertEquals(Long.valueOf(ids1[1]), removeResults1.get(0).get()); @@ -255,7 +269,7 @@ public List> run() throws IOException { } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); } - iter = proto.listPathCacheEntries(0, "pool2"); + iter = proto.listPathBasedCacheEntries(0, "pool2", null); Assert.assertFalse(iter.hasNext()); } finally { if (cluster != null) { cluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index bd248bc88b9..fd7205aad68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -16385,7 +16385,7 @@ SubstringComparator - No cache pools found. + Found 0 results. @@ -16434,7 +16434,7 @@ SubstringComparator - foo + bob bob rw-rw-r-- 100 From 2f8297215fff6f30739f266e4f64713adb95d0c2 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 13 Sep 2013 20:36:11 +0000 Subject: [PATCH 16/51] HDFS-5198. NameNodeRpcServer must not send back DNA_FINALIZE in reply to a cache report. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523087 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 +++ .../hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 5 ----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 4a31cd644d6..1b42824e18d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -39,3 +39,6 @@ HDFS-4949 (Unreleased) BUG FIXES HDFS-5169. hdfs.c: translateZCRException: null pointer deref when translating some exceptions. (Contributed by Colin Patrick McCabe) + + HDFS-5198. NameNodeRpcServer must not send back DNA_FINALIZE in reply to a + cache report. (Contributed by Colin Patrick McCabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b7cd4e14dc5..a78befb82f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -31,13 +31,11 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; -import java.util.NoSuchElementException; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator; -import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -971,9 +969,6 @@ public DatanodeCommand cacheReport(DatanodeRegistration nodeReg, verifyRequest(nodeReg); BlockListAsLongs blist = new BlockListAsLongs(blocks); namesystem.getBlockManager().processCacheReport(nodeReg, poolId, blist); - if (nn.getFSImage().isUpgradeFinalized() && !nn.isStandbyState()) { - return new FinalizeCommand(poolId); - } return null; } From 1a1f49fa4f1696712b921e7b499d602292e10279 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 13 Sep 2013 20:47:15 +0000 Subject: [PATCH 17/51] HDFS-5195. Prevent passing null pointer to mlock and munlock. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523093 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/org/apache/hadoop/io/nativeio/NativeIO.c | 11 +++++++++++ .../org/apache/hadoop/io/nativeio/TestNativeIO.java | 9 ++++++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt | 2 ++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index afa4720e507..56f0f71eb5a 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -363,6 +363,15 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_sync_1file_1range( #endif } +#define CHECK_DIRECT_BUFFER_ADDRESS(buf) \ + { \ + if (!buf) { \ + THROW(env, "java/lang/UnsupportedOperationException", \ + "JNI access to direct buffers not available"); \ + return; \ + } \ + } + /** * public static native void mlock_native( * ByteBuffer buffer, long offset); @@ -379,6 +388,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native( PASS_EXCEPTIONS(env); if (mlock(buf, len)) { + CHECK_DIRECT_BUFFER_ADDRESS(buf); throw_ioe(env, errno); } } @@ -399,6 +409,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native( PASS_EXCEPTIONS(env); if (munlock(buf, len)) { + CHECK_DIRECT_BUFFER_ADDRESS(buf); throw_ioe(env, errno); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 69c963f2d75..917532e4bf8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -545,9 +545,12 @@ public void testMlock() throws Exception { bufSum += buf[i]; } FileOutputStream fos = new FileOutputStream(TEST_FILE); - fos.write(buf); - fos.getChannel().force(true); - fos.close(); + try { + fos.write(buf); + fos.getChannel().force(true); + } finally { + fos.close(); + } FileInputStream fis = null; FileChannel channel = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 1b42824e18d..0505fa7dfcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -42,3 +42,5 @@ HDFS-4949 (Unreleased) HDFS-5198. NameNodeRpcServer must not send back DNA_FINALIZE in reply to a cache report. (Contributed by Colin Patrick McCabe) + + HDFS-5195. Prevent passing null pointer to mlock and munlock. (cnauroth) From 40eb94ade3161d93e7a762a839004748f6d0ae89 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 13 Sep 2013 23:27:22 +0000 Subject: [PATCH 18/51] HDFS-5053. NameNode should invoke DataNode APIs to coordinate caching. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523145 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hadoop/hdfs/protocolPB/PBHelper.java | 16 + .../blockmanagement/BlockCollection.java | 11 + .../server/blockmanagement/BlockManager.java | 208 ++---- .../CacheReplicationManager.java | 595 ++++++++++++++++++ .../CacheReplicationMonitor.java | 302 +++++++++ .../CacheReplicationPolicy.java | 125 ++++ .../blockmanagement/DatanodeDescriptor.java | 138 +++- .../blockmanagement/DatanodeManager.java | 13 + .../blockmanagement/InvalidateBlocks.java | 75 +-- .../InvalidateStoredBlocks.java | 67 ++ .../PendingReplicationBlocks.java | 29 +- .../blockmanagement/ReportProcessor.java | 271 ++++++++ .../server/blockmanagement/UncacheBlocks.java | 44 ++ .../hdfs/server/datanode/BPOfferService.java | 2 + .../hdfs/server/datanode/BPServiceActor.java | 15 +- .../hadoop/hdfs/server/datanode/DNConf.java | 6 +- .../fsdataset/impl/FsDatasetCache.java | 12 +- .../datanode/metrics/DataNodeMetrics.java | 16 + .../hdfs/server/namenode/CacheManager.java | 71 ++- .../hdfs/server/namenode/FSDirectory.java | 46 ++ .../hdfs/server/namenode/FSNamesystem.java | 59 +- .../hdfs/server/namenode/INodeFile.java | 14 + .../server/namenode/NameNodeRpcServer.java | 9 +- .../namenode/metrics/NameNodeMetrics.java | 13 + .../TestCacheReplicationManager.java | 162 +++++ 27 files changed, 2079 insertions(+), 244 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 0505fa7dfcb..27f8c10dfe8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -33,6 +33,8 @@ HDFS-4949 (Unreleased) HDFS-5158. Add command-line support for manipulating cache directives. (Contributed by Colin Patrick McCabe) + HDFS-5053. NameNode should invoke DataNode APIs to coordinate caching. + (Andrew Wang) OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 78293c22d69..58d03ff26ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -102,6 +102,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT = 0; public static final String DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY = "dfs.datanode.fsdatasetcache.max.threads.per.volume"; public static final int DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT = 4; + public static final String DFS_NAMENODE_CACHING_ENABLED_KEY = "dfs.namenode.caching.enabled"; + public static final boolean DFS_NAMENODE_CACHING_ENABLED_DEFAULT = false; public static final String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port"; public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4051d01e031..e24cb0d332d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -713,6 +713,12 @@ public static BlockCommandProto convert(BlockCommand cmd) { case DatanodeProtocol.DNA_SHUTDOWN: builder.setAction(BlockCommandProto.Action.SHUTDOWN); break; + case DatanodeProtocol.DNA_CACHE: + builder.setAction(BlockCommandProto.Action.CACHE); + break; + case DatanodeProtocol.DNA_UNCACHE: + builder.setAction(BlockCommandProto.Action.UNCACHE); + break; default: throw new AssertionError("Invalid action"); } @@ -765,6 +771,8 @@ public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) { break; case DatanodeProtocol.DNA_TRANSFER: case DatanodeProtocol.DNA_INVALIDATE: + case DatanodeProtocol.DNA_CACHE: + case DatanodeProtocol.DNA_UNCACHE: case DatanodeProtocol.DNA_SHUTDOWN: builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( PBHelper.convert((BlockCommand) datanodeCommand)); @@ -818,6 +826,14 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { case SHUTDOWN: action = DatanodeProtocol.DNA_SHUTDOWN; break; + case CACHE: + action = DatanodeProtocol.DNA_CACHE; + break; + case UNCACHE: + action = DatanodeProtocol.DNA_UNCACHE; + break; + default: + throw new AssertionError("Unknown action type: " + blkCmd.getAction()); } return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index f344833a0c9..ba970e07f28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -60,6 +60,17 @@ public interface BlockCollection { */ public short getBlockReplication(); + /** + * Set cache replication factor for the collection + */ + public void setCacheReplication(short cacheReplication); + + /** + * Get cache replication factor for the collection + * @return cache replication value + */ + public short getCacheReplication(); + /** * Get the name of the collection. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 65dc7fe49ad..ac121a109c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -77,14 +77,13 @@ import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.collect.Sets; /** * Keeps information related to the blocks stored in the Hadoop cluster. */ @InterfaceAudience.Private -public class BlockManager { +public class BlockManager extends ReportProcessor { static final Log LOG = LogFactory.getLog(BlockManager.class); public static final Log blockLog = NameNode.blockStateChangeLog; @@ -163,7 +162,7 @@ public int getPendingDataNodeMessageCount() { final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** Blocks to be invalidated. */ - private final InvalidateBlocks invalidateBlocks; + private final InvalidateStoredBlocks invalidateBlocks; /** * After a failover, over-replicated blocks may not be handled @@ -219,7 +218,6 @@ public int getPendingDataNodeMessageCount() { final boolean encryptDataTransfer; // Max number of blocks to log info about during a block report. - private final long maxNumBlocksToLog; /** * When running inside a Standby node, the node may receive block reports @@ -237,10 +235,11 @@ public int getPendingDataNodeMessageCount() { public BlockManager(final Namesystem namesystem, final FSClusterStats stats, final Configuration conf) throws IOException { + super(conf); this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); - invalidateBlocks = new InvalidateBlocks(datanodeManager); + invalidateBlocks = new InvalidateStoredBlocks(datanodeManager); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); @@ -300,11 +299,7 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, this.encryptDataTransfer = conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); - - this.maxNumBlocksToLog = - conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, - DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); - + LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); @@ -1004,6 +999,7 @@ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { * Adds block to list of blocks which will be invalidated on specified * datanode and log the operation */ + @Override // ReportProcessor void addToInvalidates(final Block block, final DatanodeInfo datanode) { invalidateBlocks.add(block, datanode, true); } @@ -1049,7 +1045,8 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn); } - private void markBlockAsCorrupt(BlockToMarkCorrupt b, + @Override // ReportProcessor + void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) throws IOException { DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { @@ -1059,7 +1056,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b, BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { - blockLog.info("BLOCK markBlockAsCorrupt: " + b + blockLogInfo("#markBlockAsCorrupt: " + b + " cannot be marked as corrupt as it does not belong to any file"); addToInvalidates(b.corrupted, node); return; @@ -1123,6 +1120,9 @@ public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } + public boolean shouldPostponeBlocksFromFuture() { + return this.shouldPostponeBlocksFromFuture; + } private void postponeBlock(Block blk) { if (postponedMisreplicatedBlocks.add(blk)) { @@ -1544,61 +1544,6 @@ private void processPendingReplications() { */ } } - - /** - * StatefulBlockInfo is used to build the "toUC" list, which is a list of - * updates to the information about under-construction blocks. - * Besides the block in question, it provides the ReplicaState - * reported by the datanode in the block report. - */ - private static class StatefulBlockInfo { - final BlockInfoUnderConstruction storedBlock; - final ReplicaState reportedState; - - StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, - ReplicaState reportedState) { - this.storedBlock = storedBlock; - this.reportedState = reportedState; - } - } - - /** - * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a - * list of blocks that should be considered corrupt due to a block report. - */ - private static class BlockToMarkCorrupt { - /** The corrupted block in a datanode. */ - final BlockInfo corrupted; - /** The corresponding block stored in the BlockManager. */ - final BlockInfo stored; - /** The reason to mark corrupt. */ - final String reason; - - BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason) { - Preconditions.checkNotNull(corrupted, "corrupted is null"); - Preconditions.checkNotNull(stored, "stored is null"); - - this.corrupted = corrupted; - this.stored = stored; - this.reason = reason; - } - - BlockToMarkCorrupt(BlockInfo stored, String reason) { - this(stored, stored, reason); - } - - BlockToMarkCorrupt(BlockInfo stored, long gs, String reason) { - this(new BlockInfo(stored), stored, reason); - //the corrupted block in datanode has a different generation stamp - corrupted.setGenerationStamp(gs); - } - - @Override - public String toString() { - return corrupted + "(" - + (corrupted == stored? "same as stored": "stored=" + stored) + ")"; - } - } /** * The given datanode is reporting all its blocks. @@ -1659,15 +1604,6 @@ public void processReport(final DatanodeID nodeID, final String poolId, + ", processing time: " + (endTime - startTime) + " msecs"); } - /** - * The given datanode is reporting all of its cached blocks. - * Update the cache state of blocks in the block map. - */ - public void processCacheReport(final DatanodeID nodeID, final String poolId, - final BlockListAsLongs newReport) throws IOException { - // TODO: Implement me! - } - /** * Rescan the list of blocks which were previously postponed. */ @@ -1699,46 +1635,6 @@ private void rescanPostponedMisreplicatedBlocks() { } } - private void processReport(final DatanodeDescriptor node, - final BlockListAsLongs report) throws IOException { - // Normal case: - // Modify the (block-->datanode) map, according to the difference - // between the old and new block report. - // - Collection toAdd = new LinkedList(); - Collection toRemove = new LinkedList(); - Collection toInvalidate = new LinkedList(); - Collection toCorrupt = new LinkedList(); - Collection toUC = new LinkedList(); - reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); - - // Process the blocks on each queue - for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState); - } - for (Block b : toRemove) { - removeStoredBlock(b, node); - } - int numBlocksLogged = 0; - for (BlockInfo b : toAdd) { - addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog); - numBlocksLogged++; - } - if (numBlocksLogged > maxNumBlocksToLog) { - blockLog.info("BLOCK* processReport: logged info for " + maxNumBlocksToLog - + " of " + numBlocksLogged + " reported."); - } - for (Block b : toInvalidate) { - blockLog.info("BLOCK* processReport: " - + b + " on " + node + " size " + b.getNumBytes() - + " does not belong to any file"); - addToInvalidates(b, node); - } - for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node); - } - } - /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. @@ -1801,44 +1697,6 @@ private void processFirstBlockReport(final DatanodeDescriptor node, } } - private void reportDiff(DatanodeDescriptor dn, - BlockListAsLongs newReport, - Collection toAdd, // add to DatanodeDescriptor - Collection toRemove, // remove from DatanodeDescriptor - Collection toInvalidate, // should be removed from DN - Collection toCorrupt, // add to corrupt replicas list - Collection toUC) { // add to under-construction list - // place a delimiter in the list which separates blocks - // that have been reported from those that have not - BlockInfo delimiter = new BlockInfo(new Block(), 1); - boolean added = dn.addBlock(delimiter); - assert added : "Delimiting block cannot be present in the node"; - int headIndex = 0; //currently the delimiter is in the head of the list - int curIndex; - - if (newReport == null) - newReport = new BlockListAsLongs(); - // scan the report and process newly reported blocks - BlockReportIterator itBR = newReport.getBlockReportIterator(); - while(itBR.hasNext()) { - Block iblk = itBR.next(); - ReplicaState iState = itBR.getCurrentReplicaState(); - BlockInfo storedBlock = processReportedBlock(dn, iblk, iState, - toAdd, toInvalidate, toCorrupt, toUC); - // move block to the head of the list - if (storedBlock != null && (curIndex = storedBlock.findDatanode(dn)) >= 0) { - headIndex = dn.moveBlockToHead(storedBlock, curIndex, headIndex); - } - } - // collect blocks that have not been reported - // all of them are next to the delimiter - Iterator it = new DatanodeDescriptor.BlockIterator( - delimiter.getNext(0), dn); - while(it.hasNext()) - toRemove.add(it.next()); - dn.removeBlock(delimiter); - } - /** * Process a block replica reported by the data-node. * No side effects except adding to the passed-in Collections. @@ -1870,7 +1728,8 @@ private void reportDiff(DatanodeDescriptor dn, * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ - private BlockInfo processReportedBlock(final DatanodeDescriptor dn, + @Override // ReportProcessor + BlockInfo processReportedBlock(final DatanodeDescriptor dn, final Block block, final ReplicaState reportedState, final Collection toAdd, final Collection toInvalidate, @@ -2097,6 +1956,7 @@ private boolean isBlockUnderConstruction(BlockInfo storedBlock, } } + @Override // ReportProcessor void addStoredBlockUnderConstruction( BlockInfoUnderConstruction block, DatanodeDescriptor node, @@ -2152,7 +2012,8 @@ private void addStoredBlockImmediate(BlockInfo storedBlock, * needed replications if this takes care of the problem. * @return the block that is stored in blockMap. */ - private Block addStoredBlock(final BlockInfo block, + @Override // ReportProcessor + Block addStoredBlock(final BlockInfo block, DatanodeDescriptor node, DatanodeDescriptor delNodeHint, boolean logEveryBlock) @@ -2167,7 +2028,7 @@ private Block addStoredBlock(final BlockInfo block, } if (storedBlock == null || storedBlock.getBlockCollection() == null) { // If this block does not belong to anyfile, then we are done. - blockLog.info("BLOCK* addStoredBlock: " + block + " on " + blockLogInfo("#addStoredBlock: " + block + " on " + node + " size " + block.getNumBytes() + " but it does not belong to any file"); // we could add this block to invalidate set of this datanode. @@ -2189,7 +2050,7 @@ private Block addStoredBlock(final BlockInfo block, } } else { curReplicaDelta = 0; - blockLog.warn("BLOCK* addStoredBlock: " + blockLogWarn("#addStoredBlock: " + "Redundant addStoredBlock request received for " + storedBlock + " on " + node + " size " + storedBlock.getNumBytes()); } @@ -2247,20 +2108,6 @@ private Block addStoredBlock(final BlockInfo block, return storedBlock; } - private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { - if (!blockLog.isInfoEnabled()) { - return; - } - - StringBuilder sb = new StringBuilder(500); - sb.append("BLOCK* addStoredBlock: blockMap updated: ") - .append(node) - .append(" is added to "); - storedBlock.appendStringTo(sb); - sb.append(" size " ) - .append(storedBlock.getNumBytes()); - blockLog.info(sb); - } /** * Invalidate corrupt replicas. *

@@ -3282,4 +3129,21 @@ enum MisReplicationResult { public void shutdown() { blocksMap.close(); } + + @Override // ReportProcessor + int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, + int curIndex, int headIndex) { + return dn.moveBlockToHead(storedBlock, curIndex, headIndex); + } + + @Override // ReportProcessor + boolean addBlock(DatanodeDescriptor dn, BlockInfo block) { + return dn.addBlock(block); + } + + @Override // ReportProcessor + boolean removeBlock(DatanodeDescriptor dn, BlockInfo block) { + return dn.removeBlock(block); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java new file mode 100644 index 00000000000..fb269c7689d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java @@ -0,0 +1,595 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.util.LightWeightHashSet; +import org.apache.hadoop.util.Time; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Analogue of the BlockManager class for cached replicas. Maintains the mapping + * of cached blocks to datanodes via processing datanode cache reports. Based on + * these reports and addition and removal of caching directives in the + * CacheManager, the CacheReplicationManager will schedule caching and uncaching + * work. + * + * The CacheReplicationManager does not have a separate lock, so depends on + * taking the namesystem lock as appropriate. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +public class CacheReplicationManager extends ReportProcessor { + + private static final Log LOG = + LogFactory.getLog(CacheReplicationManager.class); + + // Statistics + private volatile long pendingCacheBlocksCount = 0L; + private volatile long underCachedBlocksCount = 0L; + private volatile long scheduledCacheBlocksCount = 0L; + + /** Used by metrics */ + public long getPendingCacheBlocksCount() { + return pendingCacheBlocksCount; + } + /** Used by metrics */ + public long getUnderCachedBlocksCount() { + return underCachedBlocksCount; + } + /** Used by metrics */ + public long getScheduledCacheBlocksCount() { + return scheduledCacheBlocksCount; + } + /** Used by metrics */ + public long getPendingBlocksToUncacheCount() { + return blocksToUncache.numBlocks(); + } + + private final Namesystem namesystem; + private final BlockManager blockManager; + private final DatanodeManager datanodeManager; + private final boolean isCachingEnabled; + + /** + * Mapping of blocks to datanodes where the block is cached + */ + final BlocksMap cachedBlocksMap; + /** + * Blocks to be uncached + */ + private final UncacheBlocks blocksToUncache; + /** + * Blocks that need to be cached + */ + private final LightWeightHashSet neededCacheBlocks; + /** + * Blocks that are being cached + */ + private final PendingReplicationBlocks pendingCacheBlocks; + + /** + * Executor for the CacheReplicationMonitor thread + */ + private ExecutorService monitor = null; + + private final Configuration conf; + + public CacheReplicationManager(final Namesystem namesystem, + final BlockManager blockManager, final DatanodeManager datanodeManager, + final FSClusterStats stats, final Configuration conf) throws IOException { + super(conf); + this.namesystem = namesystem; + this.blockManager = blockManager; + this.datanodeManager = datanodeManager; + this.conf = conf; + isCachingEnabled = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT); + if (isCachingEnabled) { + cachedBlocksMap = new BlocksMap(BlockManager.DEFAULT_MAP_LOAD_FACTOR); + blocksToUncache = new UncacheBlocks(); + pendingCacheBlocks = new PendingReplicationBlocks(1000 * conf.getInt( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT)); + neededCacheBlocks = new LightWeightHashSet(); + } else { + cachedBlocksMap = null; + blocksToUncache = null; + pendingCacheBlocks = null; + neededCacheBlocks = null; + } + } + + public void activate() { + if (isCachingEnabled) { + pendingCacheBlocks.start(); + this.monitor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(CacheReplicationMonitor.class.toString()) + .build()); + monitor.submit(new CacheReplicationMonitor(namesystem, blockManager, + datanodeManager, this, blocksToUncache, neededCacheBlocks, + pendingCacheBlocks, conf)); + monitor.shutdown(); + } + } + + public void close() { + if (isCachingEnabled) { + monitor.shutdownNow(); + try { + monitor.awaitTermination(3000, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + } + pendingCacheBlocks.stop(); + cachedBlocksMap.close(); + } + } + + public void clearQueues() { + blocksToUncache.clear(); + synchronized (neededCacheBlocks) { + neededCacheBlocks.clear(); + } + pendingCacheBlocks.clear(); + } + + public boolean isCachingEnabled() { + return isCachingEnabled; + } + + /** + * @return desired cache replication factor of the block + */ + short getCacheReplication(Block block) { + final BlockCollection bc = blockManager.blocksMap.getBlockCollection(block); + return bc == null ? 0 : bc.getCacheReplication(); + } + + /** + * Returns the number of cached replicas of a block + */ + short getNumCached(Block block) { + Iterator it = cachedBlocksMap.nodeIterator(block); + short numCached = 0; + while (it.hasNext()) { + it.next(); + numCached++; + } + return numCached; + } + + /** + * The given datanode is reporting all of its cached blocks. + * Update the cache state of blocks in the block map. + */ + public void processCacheReport(final DatanodeID nodeID, final String poolId, + final BlockListAsLongs newReport) throws IOException { + if (!isCachingEnabled) { + String error = "cacheReport received from datanode " + nodeID + + " but caching is disabled on the namenode (" + + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY + ")"; + LOG.warn(error + ", ignoring"); + throw new IOException(error); + } + namesystem.writeLock(); + final long startTime = Time.now(); //after acquiring write lock + final long endTime; + try { + final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); + if (node == null || !node.isAlive) { + throw new IOException( + "processCacheReport from dead or unregistered node: " + nodeID); + } + + // TODO: do an optimized initial cache report while in startup safemode + if (namesystem.isInStartupSafeMode()) { + blockLogInfo("#processCacheReport: " + + "discarded cache report from " + nodeID + + " because namenode still in startup phase"); + return; + } + + processReport(node, newReport); + + // TODO: process postponed blocks reported while a standby + //rescanPostponedMisreplicatedBlocks(); + } finally { + endTime = Time.now(); + namesystem.writeUnlock(); + } + + // Log the block report processing stats from Namenode perspective + final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); + if (metrics != null) { + metrics.addCacheBlockReport((int) (endTime - startTime)); + } + blockLogInfo("#processCacheReport: from " + + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + + ", processing time: " + (endTime - startTime) + " msecs"); + } + + @Override // ReportProcessor + void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) + throws IOException { + throw new UnsupportedOperationException("Corrupt blocks should not be in" + + " the cache report"); + } + + @Override // ReportProcessor + void addToInvalidates(final Block b, final DatanodeInfo node) { + blocksToUncache.add(b, node, true); + } + + @Override // ReportProcessor + void addStoredBlockUnderConstruction( + BlockInfoUnderConstruction storedBlock, DatanodeDescriptor node, + ReplicaState reportedState) { + throw new UnsupportedOperationException("Under-construction blocks" + + " should not be in the cache report"); + } + + @Override // ReportProcessor + int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, + int curIndex, int headIndex) { + return dn.moveCachedBlockToHead(storedBlock, curIndex, headIndex); + } + + @Override // ReportProcessor + boolean addBlock(DatanodeDescriptor dn, BlockInfo block) { + return dn.addCachedBlock(block); + } + + @Override // ReportProcessor + boolean removeBlock(DatanodeDescriptor dn, BlockInfo block) { + return dn.removeCachedBlock(block); + } + + /** + * Similar to processReportedBlock. Simpler since it doesn't need to worry + * about under construction and corrupt replicas. + * + * @return Updated BlockInfo for the block if it should be kept, null if + * it is to be invalidated. + */ + @Override // ReportProcessor + BlockInfo processReportedBlock(final DatanodeDescriptor dn, + final Block block, final ReplicaState reportedState, + final Collection toAdd, + final Collection toInvalidate, + Collection toCorrupt, + Collection toUC) { + + if (LOG.isDebugEnabled()) { + LOG.debug("Reported cached block " + block + + " on " + dn + " size " + block.getNumBytes() + + " replicaState = " + reportedState); + } + + final boolean shouldPostponeBlocksFromFuture = + blockManager.shouldPostponeBlocksFromFuture(); + if (shouldPostponeBlocksFromFuture && + namesystem.isGenStampInFuture(block)) { + // TODO: queuing cache operations on the standby + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " has a " + + "genstamp from the future and namenode is in standby mode," + + " ignoring"); + } + return null; + } + + BlockInfo storedBlock = blockManager.blocksMap.getStoredBlock(block); + if (storedBlock == null) { + // If blocksMap does not contain reported block id, + // the BlockManager will take care of invalidating it, and the datanode + // will automatically uncache at that point. + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " not found " + + "in blocksMap, ignoring"); + } + return null; + } + + BlockUCState ucState = storedBlock.getBlockUCState(); + + // Datanodes currently only will cache completed replicas. + // Let's just invalidate anything that's not completed and the right + // genstamp and number of bytes. + if (!ucState.equals(BlockUCState.COMPLETE) || + block.getGenerationStamp() != storedBlock.getGenerationStamp() || + block.getNumBytes() != storedBlock.getNumBytes()) { + if (shouldPostponeBlocksFromFuture) { + // TODO: queuing cache operations on the standby + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " has a " + + "mismatching genstamp or length and namenode is in standby" + + " mode, ignoring"); + } + return null; + } else { + toInvalidate.add(block); + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " scheduled" + + " for uncaching because it is misreplicated" + + " or under construction."); + } + return null; + } + } + + // It's a keeper + + // Could be present in blocksMap and not in cachedBlocksMap, add it + BlockInfo cachedBlock = cachedBlocksMap.getStoredBlock(block); + if (cachedBlock == null) { + cachedBlock = new BlockInfo(block, 0); + cachedBlocksMap.addBlockCollection(cachedBlock, + storedBlock.getBlockCollection()); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("In memory blockUCState = " + ucState); + } + + // Ignore replicas that are already scheduled for removal + if (blocksToUncache.contains(dn.getStorageID(), block)) { + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " is already" + + " scheduled to be uncached, not adding it to the cachedBlocksMap"); + } + return cachedBlock; + } + + // add replica if not already present in the cached block map + if (reportedState == ReplicaState.FINALIZED + && cachedBlock.findDatanode(dn) < 0) { + toAdd.add(cachedBlock); + } + if (LOG.isTraceEnabled()) { + LOG.trace("processReportedBlock: block " + block + " scheduled" + + " to be added to cachedBlocksMap"); + } + return cachedBlock; + } + + /** + * Modify (cached block-->datanode) map with a newly cached block. Remove + * block from set of needed cache replications if this takes care of the + * problem. + * + * @return the block that is stored in cachedBlockMap. + */ + @Override // ReportProcessor + Block addStoredBlock(final BlockInfo block, DatanodeDescriptor node, + DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { + assert block != null && namesystem.hasWriteLock(); + BlockInfo cachedBlock = block; + if (cachedBlock == null || cachedBlock.getBlockCollection() == null) { + // If this block does not belong to anyfile, then we are done. + blockLogInfo("#addStoredBlock: " + block + " on " + + node + " size " + block.getNumBytes() + + " but it does not belong to any file"); + // we could add this block to invalidate set of this datanode. + // it will happen in next block report otherwise. + return block; + } + + BlockCollection bc = cachedBlock.getBlockCollection(); + + // add block to the datanode + boolean added = node.addCachedBlock(cachedBlock); + + int curReplicaDelta; + if (added) { + curReplicaDelta = 1; + if (logEveryBlock) { + logAddStoredBlock(cachedBlock, node); + } + } else { + curReplicaDelta = 0; + blockLogWarn("#addStoredBlock: " + + "Redundant addCachedBlock request received for " + cachedBlock + + " on " + node + " size " + cachedBlock.getNumBytes()); + } + + // Remove it from pending list if present + pendingCacheBlocks.decrement(block, node); + + // Now check for completion of blocks and safe block count + int numCachedReplicas = getNumCached(cachedBlock); + int numEffectiveCachedReplica = numCachedReplicas + + pendingCacheBlocks.getNumReplicas(cachedBlock); + + // if file is under construction, then done for now + if (bc instanceof MutableBlockCollection) { + return cachedBlock; + } + + // do not try to handle over/under-replicated blocks during first safe mode + if (!namesystem.isPopulatingReplQueues()) { + return cachedBlock; + } + + // Under-replicated + short cacheReplication = bc.getCacheReplication(); + if (numEffectiveCachedReplica >= cacheReplication) { + synchronized (neededCacheBlocks) { + neededCacheBlocks.remove(cachedBlock); + } + } else { + updateNeededCaching(cachedBlock, curReplicaDelta, 0); + } + + // Over-replicated, we don't need this new replica + if (numEffectiveCachedReplica > cacheReplication) { + blocksToUncache.add(cachedBlock, node, true); + } + + return cachedBlock; + } + + /** + * Modify (cached block-->datanode) map. Possibly generate replication tasks, + * if the removed block is still valid. + */ + @Override // ReportProcessor + void removeStoredBlock(Block block, DatanodeDescriptor node) { + blockLogDebug("#removeStoredBlock: " + block + " from " + node); + assert (namesystem.hasWriteLock()); + { + if (!cachedBlocksMap.removeNode(block, node)) { + blockLogDebug("#removeStoredBlock: " + + block + " has already been removed from node " + node); + return; + } + + // Prune the block from the map if it's the last cache replica + if (cachedBlocksMap.getStoredBlock(block).numNodes() == 0) { + cachedBlocksMap.removeBlock(block); + } + + // + // It's possible that the block was removed because of a datanode + // failure. If the block is still valid, check if replication is + // necessary. In that case, put block on a possibly-will- + // be-replicated list. + // + BlockCollection bc = blockManager.blocksMap.getBlockCollection(block); + if (bc != null) { + updateNeededCaching(block, -1, 0); + } + } + } + + /** + * Reduce cache replication factor to the new replication by randomly + * choosing replicas to invalidate. + */ + private void processOverCachedBlock(final Block block, + final short replication) { + assert namesystem.hasWriteLock(); + List nodes = getSafeReplicas(cachedBlocksMap, block); + List targets = + CacheReplicationPolicy.chooseTargetsToUncache(nodes, replication); + for (DatanodeDescriptor dn: targets) { + blocksToUncache.add(block, dn, true); + } + } + + /** Set replication for the blocks. */ + public void setCacheReplication(final short oldRepl, final short newRepl, + final String src, final Block... blocks) { + if (!isCachingEnabled) { + LOG.warn("Attempted to set cache replication for " + src + " but caching" + + " is disabled (" + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY + + "), ignoring"); + return; + } + if (newRepl == oldRepl) { + return; + } + + // update needReplication priority queues + for (Block b : blocks) { + updateNeededCaching(b, 0, newRepl-oldRepl); + } + + if (oldRepl > newRepl) { + // old replication > the new one; need to remove copies + LOG.info("Decreasing cache replication from " + oldRepl + " to " + newRepl + + " for " + src); + for (Block b : blocks) { + processOverCachedBlock(b, newRepl); + } + } else { // replication factor is increased + LOG.info("Increasing cache replication from " + oldRepl + " to " + newRepl + + " for " + src); + } + } + + /** updates a block in under replicated queue */ + private void updateNeededCaching(final Block block, + final int curReplicasDelta, int expectedReplicasDelta) { + namesystem.writeLock(); + try { + if (!namesystem.isPopulatingReplQueues()) { + return; + } + final int numCached = getNumCached(block); + final int curExpectedReplicas = getCacheReplication(block); + if (numCached < curExpectedReplicas) { + neededCacheBlocks.add(block); + } else { + synchronized (neededCacheBlocks) { + neededCacheBlocks.remove(block); + } + } + } finally { + namesystem.writeUnlock(); + } + } + + /** + * Return the safely cached replicas of a block in a BlocksMap + */ + List getSafeReplicas(BlocksMap map, Block block) { + List nodes = new ArrayList(3); + Collection corrupted = + blockManager.corruptReplicas.getNodes(block); + Iterator it = map.nodeIterator(block); + while (it.hasNext()) { + DatanodeDescriptor dn = it.next(); + // Don't count a decommissioned or decommissioning nodes + if (dn.isDecommissioned() || dn.isDecommissionInProgress()) { + continue; + } + // Don't count a corrupted node + if (corrupted != null && corrupted.contains(dn)) { + continue; + } + nodes.add(dn); + } + return nodes; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java new file mode 100644 index 00000000000..ce70b3f677f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -0,0 +1,302 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import static org.apache.hadoop.util.ExitUtil.terminate; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.util.LightWeightHashSet; + +/** + * Periodically computes new replication work. This consists of two tasks: + * + * 1) Assigning blocks in the neededCacheBlocks to datanodes where they will be + * cached. This moves them to the pendingCacheBlocks list. + * + * 2) Placing caching tasks in pendingCacheBlocks that have timed out + * back into neededCacheBlocks for reassignment. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +class CacheReplicationMonitor implements Runnable { + + private static final Log LOG = + LogFactory.getLog(CacheReplicationMonitor.class); + + private static final Log blockLog = NameNode.blockStateChangeLog; + + private final Namesystem namesystem; + private final BlockManager blockManager; + private final DatanodeManager datanodeManager; + private final CacheReplicationManager cacheReplManager; + + private final UncacheBlocks blocksToUncache; + private final LightWeightHashSet neededCacheBlocks; + private final PendingReplicationBlocks pendingCacheBlocks; + + /** + * Re-check period for computing cache replication work + */ + private final long cacheReplicationRecheckInterval; + + public CacheReplicationMonitor(Namesystem namesystem, + BlockManager blockManager, DatanodeManager datanodeManager, + CacheReplicationManager cacheReplManager, + UncacheBlocks blocksToUncache, + LightWeightHashSet neededCacheBlocks, + PendingReplicationBlocks pendingCacheBlocks, + Configuration conf) { + this.namesystem = namesystem; + this.blockManager = blockManager; + this.datanodeManager = datanodeManager; + this.cacheReplManager = cacheReplManager; + + this.blocksToUncache = blocksToUncache; + this.neededCacheBlocks = neededCacheBlocks; + this.pendingCacheBlocks = pendingCacheBlocks; + + this.cacheReplicationRecheckInterval = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; + } + + @Override + public void run() { + LOG.info("CacheReplicationMonitor is starting"); + while (namesystem.isRunning()) { + try { + computeCachingWork(); + processPendingCachingWork(); + Thread.sleep(cacheReplicationRecheckInterval); + } catch (Throwable t) { + if (!namesystem.isRunning()) { + LOG.info("Stopping CacheReplicationMonitor."); + if (!(t instanceof InterruptedException)) { + LOG.info("CacheReplicationMonitor received an exception" + + " while shutting down.", t); + } + break; + } + LOG.fatal("ReplicationMonitor thread received Runtime exception. ", t); + terminate(1, t); + } + } + } + + /** + * Assigns under-cached blocks to new datanodes. + */ + private void computeCachingWork() { + List blocksToCache = null; + namesystem.writeLock(); + try { + synchronized (neededCacheBlocks) { + blocksToCache = neededCacheBlocks.pollAll(); + } + } finally { + namesystem.writeUnlock(); + } + computeCachingWorkForBlocks(blocksToCache); + computeUncacheWork(); + } + + private void computeCachingWorkForBlocks(List blocksToCache) { + int requiredRepl, effectiveRepl, additionalRepl; + List cachedNodes, storedNodes, targets; + + final HashMap> work = + new HashMap>(); + namesystem.writeLock(); + try { + synchronized (neededCacheBlocks) { + for (Block block: blocksToCache) { + // Required number of cached replicas + requiredRepl = cacheReplManager.getCacheReplication(block); + // Replicas that are safely cached + cachedNodes = cacheReplManager.getSafeReplicas( + cacheReplManager.cachedBlocksMap, block); + // Replicas that are safely stored on disk + storedNodes = cacheReplManager.getSafeReplicas( + blockManager.blocksMap, block); + // "effective" replication factor which includes pending + // replication work + effectiveRepl = cachedNodes.size() + + pendingCacheBlocks.getNumReplicas(block); + if (effectiveRepl >= requiredRepl) { + neededCacheBlocks.remove(block); + blockLog.info("BLOCK* Removing " + block + + " from neededCacheBlocks as it has enough cached replicas"); + continue; + } + // Choose some replicas to cache if needed + additionalRepl = requiredRepl - effectiveRepl; + targets = new ArrayList(storedNodes); + // Only target replicas that aren't already cached. + for (DatanodeDescriptor dn: storedNodes) { + if (!cachedNodes.contains(dn)) { + targets.add(dn); + } + } + if (targets.size() < additionalRepl) { + if (LOG.isDebugEnabled()) { + LOG.debug("Block " + block + " cannot be cached on additional" + + " nodes because there are no more available datanodes" + + " with the block on disk."); + } + } + targets = CacheReplicationPolicy.chooseTargetsToCache(block, targets, + additionalRepl); + if (targets.size() < additionalRepl) { + if (LOG.isDebugEnabled()) { + LOG.debug("Block " + block + " cannot be cached on additional" + + " nodes because there is not sufficient cache space on" + + " available target datanodes."); + } + } + // Continue if we couldn't get more cache targets + if (targets.size() == 0) { + continue; + } + + // Update datanodes and blocks that were scheduled for caching + work.put(block, targets); + // Schedule caching on the targets + for (DatanodeDescriptor target: targets) { + target.addBlockToBeCached(block); + } + // Add block to the pending queue + pendingCacheBlocks.increment(block, + targets.toArray(new DatanodeDescriptor[] {})); + if (blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* block " + block + + " is moved from neededCacheBlocks to pendingCacheBlocks"); + } + // Remove from needed queue if it will be fully replicated + if (effectiveRepl + targets.size() >= requiredRepl) { + neededCacheBlocks.remove(block); + } + } + } + } finally { + namesystem.writeUnlock(); + } + + if (blockLog.isInfoEnabled()) { + // log which blocks have been scheduled for replication + for (Entry> item : work.entrySet()) { + Block block = item.getKey(); + List nodes = item.getValue(); + StringBuilder targetList = new StringBuilder("datanode(s)"); + for (DatanodeDescriptor node: nodes) { + targetList.append(' '); + targetList.append(node); + } + blockLog.info("BLOCK* ask " + targetList + " to cache " + block); + } + } + + if (blockLog.isDebugEnabled()) { + blockLog.debug( + "BLOCK* neededCacheBlocks = " + neededCacheBlocks.size() + + " pendingCacheBlocks = " + pendingCacheBlocks.size()); + } + } + + /** + * Reassign pending caching work that has timed out + */ + private void processPendingCachingWork() { + Block[] timedOutItems = pendingCacheBlocks.getTimedOutBlocks(); + if (timedOutItems != null) { + namesystem.writeLock(); + try { + for (int i = 0; i < timedOutItems.length; i++) { + Block block = timedOutItems[i]; + final short numCached = cacheReplManager.getNumCached(block); + final short cacheReplication = + cacheReplManager.getCacheReplication(block); + // Needs to be cached if under-replicated + if (numCached < cacheReplication) { + synchronized (neededCacheBlocks) { + neededCacheBlocks.add(block); + } + } + } + } finally { + namesystem.writeUnlock(); + } + } + } + + /** + * Schedule blocks for uncaching at datanodes + * @return total number of block for deletion + */ + int computeUncacheWork() { + final List nodes = blocksToUncache.getStorageIDs(); + int blockCnt = 0; + for (String node: nodes) { + blockCnt += uncachingWorkForOneNode(node); + } + return blockCnt; + } + + /** + * Gets the list of blocks scheduled for uncaching at a datanode and + * schedules them for uncaching. + * + * @return number of blocks scheduled for removal + */ + private int uncachingWorkForOneNode(String nodeId) { + final List toInvalidate; + final DatanodeDescriptor dn; + + namesystem.writeLock(); + try { + // get blocks to invalidate for the nodeId + assert nodeId != null; + dn = datanodeManager.getDatanode(nodeId); + if (dn == null) { + blocksToUncache.remove(nodeId); + return 0; + } + toInvalidate = blocksToUncache.invalidateWork(nodeId, dn); + if (toInvalidate == null) { + return 0; + } + } finally { + namesystem.writeUnlock(); + } + if (blockLog.isInfoEnabled()) { + blockLog.info("BLOCK* " + getClass().getSimpleName() + + ": ask " + dn + " to uncache " + toInvalidate); + } + return toInvalidate.size(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java new file mode 100644 index 00000000000..2674f6a0c77 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.math.random.RandomData; +import org.apache.commons.math.random.RandomDataImpl; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.Block; + +/** + * Helper class used by the CacheReplicationManager and CacheReplicationMonitor + * to select datanodes where blocks should be cached or uncached. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +public class CacheReplicationPolicy { + + /** + * @return List of datanodes with sufficient capacity to cache the block + */ + private static List selectSufficientCapacity(Block block, + List targets) { + List sufficient = + new ArrayList(targets.size()); + for (DatanodeDescriptor dn: targets) { + long remaining = dn.getCacheRemaining(); + if (remaining >= block.getNumBytes()) { + sufficient.add(dn); + } + } + return sufficient; + } + + /** + * Returns a random datanode from targets, weighted by the amount of free + * cache capacity on the datanode. Prunes unsuitable datanodes from the + * targets list. + * + * @param block Block to be cached + * @param targets List of potential cache targets + * @return a random DN, or null if no datanodes are available or have enough + * cache capacity. + */ + private static DatanodeDescriptor randomDatanodeByRemainingCache(Block block, + List targets) { + // Hold a lottery biased by the amount of free space to decide + // who gets the block + Collections.shuffle(targets); + TreeMap lottery = + new TreeMap(); + long totalCacheAvailable = 0; + for (DatanodeDescriptor dn: targets) { + long remaining = dn.getCacheRemaining(); + totalCacheAvailable += remaining; + lottery.put(totalCacheAvailable, dn); + } + // Pick our lottery winner + RandomData r = new RandomDataImpl(); + long winningTicket = r.nextLong(0, totalCacheAvailable - 1); + Entry winner = lottery.higherEntry(winningTicket); + return winner.getValue(); + } + + /** + * Chooses numTargets new cache replicas for a block from a list of targets. + * Will return fewer targets than requested if not enough nodes are available. + * + * @return List of target datanodes + */ + static List chooseTargetsToCache(Block block, + List targets, int numTargets) { + List sufficient = + selectSufficientCapacity(block, targets); + List chosen = + new ArrayList(numTargets); + for (int i = 0; i < numTargets && !sufficient.isEmpty(); i++) { + chosen.add(randomDatanodeByRemainingCache(block, sufficient)); + } + return chosen; + } + + /** + * Given a list cache replicas where a block is cached, choose replicas to + * uncache to drop the cache replication factor down to replication. + * + * @param nodes list of datanodes where the block is currently cached + * @param replication desired replication factor + * @return List of datanodes to uncache + */ + public static List chooseTargetsToUncache( + List nodes, short replication) { + final int effectiveReplication = nodes.size(); + List targets = + new ArrayList(effectiveReplication); + Collections.shuffle(nodes); + final int additionalTargetsNeeded = effectiveReplication - replication; + int chosen = 0; + while (chosen < additionalTargetsNeeded && !nodes.isEmpty()) { + targets.add(nodes.get(chosen)); + chosen++; + } + return targets; + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 713a156cc46..4fd06d3cd16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -22,6 +22,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Queue; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; @@ -30,6 +31,9 @@ import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.Time; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; + /** * This class extends the DatanodeInfo class with ephemeral information (eg * health, capacity, what blocks are associated with the Datanode) that is @@ -93,8 +97,24 @@ synchronized void clear() { } } + /** + * Head of the list of blocks on the datanode + */ private volatile BlockInfo blockList = null; + /** + * Number of blocks on the datanode + */ private int numBlocks = 0; + + /** + * Head of the list of cached blocks on the datanode + */ + private volatile BlockInfo cachedBlockList = null; + /** + * Number of cached blocks on the datanode + */ + private int numCachedBlocks = 0; + // isAlive == heartbeats.contains(this) // This is an optimization, because contains takes O(n) time on Arraylist public boolean isAlive = false; @@ -134,6 +154,12 @@ synchronized void clear() { /** A set of blocks to be invalidated by this datanode */ private LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); + /** A queue of blocks to be cached by this datanode */ + private BlockQueue cacheBlocks = new BlockQueue(); + /** A set of blocks to be uncached by this datanode */ + private LightWeightHashSet blocksToUncache = + new LightWeightHashSet(); + /* Variables for maintaining number of blocks scheduled to be written to * this datanode. This count is approximate and might be slightly bigger * in case of errors (e.g. datanode does not report if an error occurs @@ -260,14 +286,57 @@ int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) { return curIndex; } + /** + * Add block to the list of cached blocks on the data-node. + * @return true if block was successfully added, false if already present + */ + public boolean addCachedBlock(BlockInfo b) { + if (!b.addNode(this)) + return false; + // add to the head of the data-node list + cachedBlockList = b.listInsert(cachedBlockList, this); + numCachedBlocks++; + return true; + } + + /** + * Remove block from the list of cached blocks on the data-node. + * @return true if block was successfully removed, false if not present + */ + public boolean removeCachedBlock(BlockInfo b) { + cachedBlockList = b.listRemove(cachedBlockList, this); + if (b.removeNode(this)) { + numCachedBlocks--; + return true; + } else { + return false; + } + } + + /** + * Move block to the head of the list of cached blocks on the data-node. + * @return the index of the head of the blockList + */ + int moveCachedBlockToHead(BlockInfo b, int curIndex, int headIndex) { + cachedBlockList = b.moveBlockToHead(cachedBlockList, this, curIndex, + headIndex); + return curIndex; + } + /** * Used for testing only * @return the head of the blockList */ + @VisibleForTesting protected BlockInfo getHead(){ return blockList; } + @VisibleForTesting + protected BlockInfo getCachedHead() { + return cachedBlockList; + } + /** * Replace specified old block with a new one in the DataNodeDescriptor. * @@ -290,7 +359,9 @@ public void resetBlocks() { setDfsUsed(0); setXceiverCount(0); this.blockList = null; + this.cachedBlockList = null; this.invalidateBlocks.clear(); + this.blocksToUncache.clear(); this.volumeFailures = 0; } @@ -300,12 +371,20 @@ public void clearBlockQueues() { this.recoverBlocks.clear(); this.replicateBlocks.clear(); } + synchronized(blocksToUncache) { + this.blocksToUncache.clear(); + this.cacheBlocks.clear(); + } } public int numBlocks() { return numBlocks; } + public int numCachedBlocks() { + return numCachedBlocks; + } + /** * Updates stats from datanode heartbeat. */ @@ -358,7 +437,11 @@ public void remove() { public Iterator getBlockIterator() { return new BlockIterator(this.blockList, this); } - + + public Iterator getCachedBlockIterator() { + return new BlockIterator(this.cachedBlockList, this); + } + /** * Store block replication work. */ @@ -367,6 +450,14 @@ void addBlockToBeReplicated(Block block, DatanodeDescriptor[] targets) { replicateBlocks.offer(new BlockTargetPair(block, targets)); } + /** + * Store block caching work. + */ + void addBlockToBeCached(Block block) { + assert(block != null); + cacheBlocks.offer(block); + } + /** * Store block recovery work. */ @@ -390,6 +481,18 @@ void addBlocksToBeInvalidated(List blocklist) { } } } + + /** + * Store block uncaching work. + */ + void addBlocksToBeUncached(List blocklist) { + assert(blocklist != null && blocklist.size() > 0); + synchronized (blocksToUncache) { + for (Block blk : blocklist) { + blocksToUncache.add(blk); + } + } + } /** * The number of work items that are pending to be replicated @@ -398,6 +501,13 @@ int getNumberOfBlocksToBeReplicated() { return replicateBlocks.size(); } + /** + * The number of pending cache work items + */ + int getNumberOfBlocksToBeCached() { + return cacheBlocks.size(); + } + /** * The number of block invalidation items that are pending to * be sent to the datanode @@ -407,11 +517,24 @@ int getNumberOfBlocksToBeInvalidated() { return invalidateBlocks.size(); } } - + + /** + * The number of pending uncache work items + */ + int getNumberOfBlocksToBeUncached() { + synchronized (blocksToUncache) { + return blocksToUncache.size(); + } + } + public List getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } + public List getCacheBlocks() { + return cacheBlocks.poll(cacheBlocks.size()); + } + public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { List blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) @@ -430,6 +553,17 @@ public Block[] getInvalidateBlocks(int maxblocks) { } } + /** + * Remove up to the maximum number of blocks to be uncached + */ + public Block[] getInvalidateCacheBlocks() { + synchronized (blocksToUncache) { + Block[] deleteList = blocksToUncache.pollToArray( + new Block[blocksToUncache.size()]); + return deleteList.length == 0 ? null : deleteList; + } + } + /** * @return Approximate number of blocks currently scheduled to be written * to this datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 52858139001..af5bf697839 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1230,6 +1230,19 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, blockPoolId, blks)); } + // Check pending caching + List pendingCacheList = nodeinfo.getCacheBlocks(); + if (pendingCacheList != null) { + cmds.add(new BlockCommand(DatanodeProtocol.DNA_CACHE, blockPoolId, + pendingCacheList.toArray(new Block[] {}))); + } + // Check cached block invalidation + blks = nodeinfo.getInvalidateCacheBlocks(); + if (blks != null) { + cmds.add(new BlockCommand(DatanodeProtocol.DNA_UNCACHE, + blockPoolId, blks)); + } + blockManager.addKeyUpdateCommand(cmds, nodeinfo); // check for balancer bandwidth update diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 841ca41755f..4b4d38e7156 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.io.PrintWriter; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -35,24 +34,22 @@ * on the machine in question. */ @InterfaceAudience.Private -class InvalidateBlocks { +abstract class InvalidateBlocks { /** Mapping: StorageID -> Collection of Blocks */ private final Map> node2blocks = new TreeMap>(); /** The total number of blocks in the map. */ private long numBlocks = 0L; - private final DatanodeManager datanodeManager; - - InvalidateBlocks(final DatanodeManager datanodeManager) { - this.datanodeManager = datanodeManager; - } - /** @return the number of blocks to be invalidated . */ synchronized long numBlocks() { return numBlocks; } + synchronized int numStorages() { + return node2blocks.size(); + } + /** * @return true if the given storage has the given block listed for * invalidation. Blocks are compared including their generation stamps: @@ -111,22 +108,22 @@ synchronized void remove(final String storageID, final Block block) { } } - /** Print the contents to out. */ - synchronized void dump(final PrintWriter out) { - final int size = node2blocks.values().size(); - out.println("Metasave: Blocks " + numBlocks - + " waiting deletion from " + size + " datanodes."); - if (size == 0) { - return; + /** + * Polls up to limit blocks from the list of to-be-invalidated Blocks + * for a storage. + */ + synchronized List pollNumBlocks(final String storageId, final int limit) { + final LightWeightHashSet set = node2blocks.get(storageId); + if (set == null) { + return null; } - - for(Map.Entry> entry : node2blocks.entrySet()) { - final LightWeightHashSet blocks = entry.getValue(); - if (blocks.size() > 0) { - out.println(datanodeManager.getDatanode(entry.getKey())); - out.println(blocks); - } + List polledBlocks = set.pollN(limit); + // Remove the storage if the set is now empty + if (set.isEmpty()) { + remove(storageId); } + numBlocks -= polledBlocks.size(); + return polledBlocks; } /** @return a list of the storage IDs. */ @@ -134,26 +131,22 @@ synchronized List getStorageIDs() { return new ArrayList(node2blocks.keySet()); } - synchronized List invalidateWork( - final String storageId, final DatanodeDescriptor dn) { - final LightWeightHashSet set = node2blocks.get(storageId); - if (set == null) { - return null; - } - - // # blocks that can be sent in one message is limited - final int limit = datanodeManager.blockInvalidateLimit; - final List toInvalidate = set.pollN(limit); - - // If we send everything in this message, remove this node entry - if (set.isEmpty()) { - remove(storageId); - } - - dn.addBlocksToBeInvalidated(toInvalidate); - numBlocks -= toInvalidate.size(); - return toInvalidate; + /** + * Return the set of to-be-invalidated blocks for a storage. + */ + synchronized LightWeightHashSet getBlocks(String storageId) { + return node2blocks.get(storageId); } + + /** + * Schedules invalidation work associated with a storage at the corresponding + * datanode. + * @param storageId Storage of blocks to be invalidated + * @param dn Datanode where invalidation work will be scheduled + * @return List of blocks scheduled for invalidation at the datanode + */ + abstract List invalidateWork(final String storageId, + final DatanodeDescriptor dn); synchronized void clear() { node2blocks.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java new file mode 100644 index 00000000000..23f3c68eea3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.PrintWriter; +import java.util.List; + +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.util.LightWeightHashSet; + +/** + * Subclass of InvalidateBlocks used by the BlockManager to + * track blocks on each storage that are scheduled to be invalidated. + */ +public class InvalidateStoredBlocks extends InvalidateBlocks { + + private final DatanodeManager datanodeManager; + + InvalidateStoredBlocks(DatanodeManager datanodeManager) { + this.datanodeManager = datanodeManager; + } + + /** Print the contents to out. */ + synchronized void dump(final PrintWriter out) { + final int size = numStorages(); + out.println("Metasave: Blocks " + numBlocks() + + " waiting deletion from " + size + " datanodes."); + if (size == 0) { + return; + } + + List storageIds = getStorageIDs(); + for (String storageId: storageIds) { + LightWeightHashSet blocks = getBlocks(storageId); + if (blocks != null && !blocks.isEmpty()) { + out.println(datanodeManager.getDatanode(storageId)); + out.println(blocks); + } + } + } + + @Override + synchronized List invalidateWork( + final String storageId, final DatanodeDescriptor dn) { + final List toInvalidate = pollNumBlocks(storageId, + datanodeManager.blockInvalidateLimit); + if (toInvalidate != null) { + dn.addBlocksToBeInvalidated(toInvalidate); + } + return toInvalidate; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java index 6b07b789341..4f304a1846d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java @@ -29,20 +29,27 @@ import java.util.Map; import org.apache.commons.logging.Log; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.util.Daemon; -/*************************************************** - * PendingReplicationBlocks does the bookkeeping of all - * blocks that are getting replicated. - * - * It does the following: - * 1) record blocks that are getting replicated at this instant. - * 2) a coarse grain timer to track age of replication request - * 3) a thread that periodically identifies replication-requests - * that never made it. - * - ***************************************************/ +/** + * PendingReplicationBlocks is used in the BlockManager to track blocks that are + * currently being replicated on disk and in the CacheReplicationManager to + * track blocks that are currently being cached. + * + *

+ * PendingReplicationBlocks performs the following tasks: + *

+ * + *
    + *
  1. tracks in-flight replication or caching requests for a block at target + * datanodes.
  2. + *
  3. identifies requests that have timed out and need to be rescheduled at a + * different datanode.
  4. + *
+ */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) class PendingReplicationBlocks { private static final Log LOG = BlockManager.LOG; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java new file mode 100644 index 00000000000..c32e5d1dc25 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java @@ -0,0 +1,271 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; + +import org.apache.commons.logging.Log; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.namenode.NameNode; + +import com.google.common.base.Preconditions; + +/** + * Handles common operations of processing a block report from a datanode, + * generating a diff of updates to the BlocksMap, and then feeding the diff + * to the subclass-implemented hooks. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +public abstract class ReportProcessor { + + static final Log blockLog = NameNode.blockStateChangeLog; + private final String className = getClass().getSimpleName(); + // Max number of blocks to log info about during a block report. + final long maxNumBlocksToLog; + + void blockLogDebug(String message) { + if (blockLog.isDebugEnabled()) { + blockLog.info("BLOCK* " + className + message); + } + } + + void blockLogInfo(String message) { + if (blockLog.isInfoEnabled()) { + blockLog.info("BLOCK* " + className + message); + } + } + + void blockLogWarn(String message) { + blockLog.warn("BLOCK* " + className + message); + } + + void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { + if (!blockLog.isInfoEnabled()) { + return; + } + StringBuilder sb = new StringBuilder(500); + sb.append("BLOCK* " + className + "#addStoredBlock: blockMap updated: ") + .append(node) + .append(" is added to "); + storedBlock.appendStringTo(sb); + sb.append(" size " ) + .append(storedBlock.getNumBytes()); + blockLog.info(sb); + } + + public ReportProcessor(Configuration conf) { + this.maxNumBlocksToLog = conf.getLong( + DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, + DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); + } + + /** + * Processes a block report from a datanode, updating the block to + * datanode mapping, adding new blocks and removing invalid ones. + * Also computes and queues new replication and invalidation work. + * @param node Datanode sending the block report + * @param report as list of longs + * @throws IOException + */ + final void processReport(final DatanodeDescriptor node, + final BlockListAsLongs report) throws IOException { + // Normal case: + // Modify the (block-->datanode) map, according to the difference + // between the old and new block report. + // + Collection toAdd = new LinkedList(); + Collection toRemove = new LinkedList(); + Collection toInvalidate = new LinkedList(); + Collection toCorrupt = new LinkedList(); + Collection toUC = new LinkedList(); + reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); + + // Process the blocks on each queue + for (StatefulBlockInfo b : toUC) { + addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState); + } + for (Block b : toRemove) { + removeStoredBlock(b, node); + } + int numBlocksLogged = 0; + for (BlockInfo b : toAdd) { + addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog); + numBlocksLogged++; + } + + if (numBlocksLogged > maxNumBlocksToLog) { + blockLogInfo("#processReport: logged" + + " info for " + maxNumBlocksToLog + + " of " + numBlocksLogged + " reported."); + } + for (Block b : toInvalidate) { + blockLogInfo("#processReport: " + + b + " on " + node + " size " + b.getNumBytes() + + " does not belong to any file"); + addToInvalidates(b, node); + } + for (BlockToMarkCorrupt b : toCorrupt) { + markBlockAsCorrupt(b, node); + } + } + + /** + * Compute the difference between the current state of the datanode in the + * BlocksMap and the new reported state, categorizing changes into + * different groups (e.g. new blocks to be added, blocks that were removed, + * blocks that should be invalidated, etc.). + */ + private void reportDiff(DatanodeDescriptor dn, + BlockListAsLongs newReport, + Collection toAdd, // add to DatanodeDescriptor + Collection toRemove, // remove from DatanodeDescriptor + Collection toInvalidate, // should be removed from DN + Collection toCorrupt, // add to corrupt replicas list + Collection toUC) { // add to under-construction list + // place a delimiter in the list which separates blocks + // that have been reported from those that have not + BlockInfo delimiter = new BlockInfo(new Block(), 1); + boolean added = addBlock(dn, delimiter); + assert added : "Delimiting block cannot be present in the node"; + int headIndex = 0; //currently the delimiter is in the head of the list + int curIndex; + + if (newReport == null) { + newReport = new BlockListAsLongs(); + } + // scan the report and process newly reported blocks + BlockReportIterator itBR = newReport.getBlockReportIterator(); + while (itBR.hasNext()) { + Block iblk = itBR.next(); + ReplicaState iState = itBR.getCurrentReplicaState(); + BlockInfo storedBlock = processReportedBlock(dn, iblk, iState, + toAdd, toInvalidate, toCorrupt, toUC); + // move block to the head of the list + if (storedBlock != null && (curIndex = storedBlock.findDatanode(dn)) >= 0) { + headIndex = moveBlockToHead(dn, storedBlock, curIndex, headIndex); + } + } + // collect blocks that have not been reported + // all of them are next to the delimiter + Iterator it = new DatanodeDescriptor.BlockIterator( + delimiter.getNext(0), dn); + while (it.hasNext()) { + toRemove.add(it.next()); + } + removeBlock(dn, delimiter); + } + + // Operations on the blocks on a datanode + + abstract int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, + int curIndex, int headIndex); + + abstract boolean addBlock(DatanodeDescriptor dn, BlockInfo block); + + abstract boolean removeBlock(DatanodeDescriptor dn, BlockInfo block); + + // Cache report processing + + abstract BlockInfo processReportedBlock(DatanodeDescriptor dn, Block iblk, + ReplicaState iState, Collection toAdd, + Collection toInvalidate, Collection toCorrupt, + Collection toUC); + + // Hooks for processing the cache report diff + + abstract Block addStoredBlock(final BlockInfo block, + DatanodeDescriptor node, DatanodeDescriptor delNodeHint, + boolean logEveryBlock) throws IOException; + + abstract void removeStoredBlock(Block block, DatanodeDescriptor node); + + abstract void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) + throws IOException; + + abstract void addToInvalidates(final Block b, final DatanodeInfo node); + + abstract void addStoredBlockUnderConstruction( + BlockInfoUnderConstruction storedBlock, DatanodeDescriptor node, + ReplicaState reportedState) throws IOException; + + /** + * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a + * list of blocks that should be considered corrupt due to a block report. + */ + static class BlockToMarkCorrupt { + /** The corrupted block in a datanode. */ + final BlockInfo corrupted; + /** The corresponding block stored in the BlockManager. */ + final BlockInfo stored; + /** The reason to mark corrupt. */ + final String reason; + + BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason) { + Preconditions.checkNotNull(corrupted, "corrupted is null"); + Preconditions.checkNotNull(stored, "stored is null"); + + this.corrupted = corrupted; + this.stored = stored; + this.reason = reason; + } + + BlockToMarkCorrupt(BlockInfo stored, String reason) { + this(stored, stored, reason); + } + + BlockToMarkCorrupt(BlockInfo stored, long gs, String reason) { + this(new BlockInfo(stored), stored, reason); + //the corrupted block in datanode has a different generation stamp + corrupted.setGenerationStamp(gs); + } + + @Override + public String toString() { + return corrupted + "(" + + (corrupted == stored? "same as stored": "stored=" + stored) + ")"; + } + } + + /** + * StatefulBlockInfo is used to build the "toUC" list, which is a list of + * updates to the information about under-construction blocks. + * Besides the block in question, it provides the ReplicaState + * reported by the datanode in the block report. + */ + static class StatefulBlockInfo { + final BlockInfoUnderConstruction storedBlock; + final ReplicaState reportedState; + + StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, + ReplicaState reportedState) { + this.storedBlock = storedBlock; + this.reportedState = reportedState; + } + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java new file mode 100644 index 00000000000..855b73feb96 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.Block; + +/** + * Subclass of InvalidateBlocks used by the CacheReplicationManager to + * track blocks on each storage that are scheduled to be uncached. + */ +@InterfaceAudience.Private +public class UncacheBlocks extends InvalidateBlocks { + + UncacheBlocks() { + } + + @Override + synchronized List invalidateWork( + final String storageId, final DatanodeDescriptor dn) { + final List toInvalidate = pollNumBlocks(storageId, Integer.MAX_VALUE); + if (toInvalidate != null) { + dn.addBlocksToBeUncached(toInvalidate); + } + return toInvalidate; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 64ca7270754..3c01345a3fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -552,10 +552,12 @@ private boolean processCommandFromActive(DatanodeCommand cmd, case DatanodeProtocol.DNA_CACHE: LOG.info("DatanodeCommand action: DNA_CACHE"); dn.getFSDataset().cache(bcmd.getBlockPoolId(), bcmd.getBlocks()); + dn.metrics.incrBlocksCached(bcmd.getBlocks().length); break; case DatanodeProtocol.DNA_UNCACHE: LOG.info("DatanodeCommand action: DNA_UNCACHE"); dn.getFSDataset().uncache(bcmd.getBlockPoolId(), bcmd.getBlocks()); + dn.metrics.incrBlocksUncached(bcmd.getBlocks().length); break; case DatanodeProtocol.DNA_SHUTDOWN: // TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 81207d37cbc..b4912881a05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -449,11 +449,24 @@ DatanodeCommand cacheReport() throws IOException { DatanodeCommand cmd = null; long startTime = Time.monotonicNow(); if (startTime - lastCacheReport > dnConf.cacheReportInterval) { - // TODO: Implement me! + if (LOG.isDebugEnabled()) { + LOG.debug("Sending cacheReport from service actor: " + this); + } + lastCacheReport = startTime; + String bpid = bpos.getBlockPoolId(); BlockListAsLongs blocks = dn.getFSDataset().getCacheReport(bpid); + long createTime = Time.monotonicNow(); + cmd = bpNamenode.cacheReport(bpRegistration, bpid, blocks.getBlockListAsLongs()); + long sendTime = Time.monotonicNow(); + long createCost = createTime - startTime; + long sendCost = sendTime - createTime; + dn.getMetrics().addCacheReport(sendCost); + LOG.info("CacheReport of " + blocks.getNumberOfBlocks() + + " blocks took " + createCost + " msec to generate and " + + sendCost + " msecs for RPC and NN processing"); } return cmd; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index 3aafeadcdcb..5d7afc7ecfd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; @@ -114,9 +116,9 @@ public DNConf(Configuration conf) { DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, - DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); - this.cacheReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT); + this.cacheReportInterval = conf.getLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, + DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT); long initBRDelay = conf.getLong( DFS_BLOCKREPORT_INITIAL_DELAY_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 938189a5d84..b0a3a8d77fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -105,10 +105,10 @@ boolean isCached(String bpid, Block block) { */ List getCachedBlocks(String bpid) { List blocks = new ArrayList(); - MappableBlock mapBlock = null; // ConcurrentHashMap iteration doesn't see latest updates, which is okay - for (Iterator it = cachedBlocks.values().iterator(); - it.hasNext(); mapBlock = it.next()) { + Iterator it = cachedBlocks.values().iterator(); + while (it.hasNext()) { + MappableBlock mapBlock = it.next(); if (mapBlock.getBlockPoolId().equals(bpid)) { blocks.add(mapBlock.getBlock()); } @@ -174,12 +174,15 @@ void uncacheBlock(String bpid, Block block) { mapBlock.getBlockPoolId().equals(bpid) && mapBlock.getBlock().equals(block)) { mapBlock.close(); - cachedBlocks.remove(mapBlock); + cachedBlocks.remove(block.getBlockId()); long bytes = mapBlock.getNumBytes(); long used = usedBytes.get(); while (!usedBytes.compareAndSet(used, used - bytes)) { used = usedBytes.get(); } + LOG.info("Successfully uncached block " + block); + } else { + LOG.info("Could not uncache block " + block + ": unknown block."); } } @@ -219,6 +222,7 @@ public void run() { used = usedBytes.get(); } } else { + LOG.info("Successfully cached block " + block.getBlock()); cachedBlocks.put(block.getBlock().getBlockId(), block); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java index a9237c59106..ffdb8e7cf86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java @@ -57,6 +57,8 @@ public class DataNodeMetrics { @Metric MutableCounterLong blocksRemoved; @Metric MutableCounterLong blocksVerified; @Metric MutableCounterLong blockVerificationFailures; + @Metric MutableCounterLong blocksCached; + @Metric MutableCounterLong blocksUncached; @Metric MutableCounterLong readsFromLocalClient; @Metric MutableCounterLong readsFromRemoteClient; @Metric MutableCounterLong writesFromLocalClient; @@ -74,6 +76,7 @@ public class DataNodeMetrics { @Metric MutableRate replaceBlockOp; @Metric MutableRate heartbeats; @Metric MutableRate blockReports; + @Metric MutableRate cacheReports; @Metric MutableRate packetAckRoundTripTimeNanos; MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles; @@ -151,6 +154,10 @@ public void addBlockReport(long latency) { blockReports.add(latency); } + public void addCacheReport(long latency) { + cacheReports.add(latency); + } + public void incrBlocksReplicated(int delta) { blocksReplicated.incr(delta); } @@ -175,6 +182,15 @@ public void incrBlocksVerified() { blocksVerified.incr(); } + + public void incrBlocksCached(int delta) { + blocksCached.incr(delta); + } + + public void incrBlocksUncached(int delta) { + blocksUncached.incr(delta); + } + public void addReadBlockOp(long latency) { readBlockOp.add(latency); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 471defac1ce..5b82848015d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -26,9 +26,9 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.Map.Entry; import java.util.SortedMap; import java.util.TreeMap; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -51,7 +51,7 @@ /** * The Cache Manager handles caching on DataNodes. */ -final class CacheManager { +public final class CacheManager { public static final Log LOG = LogFactory.getLog(CacheManager.class); /** @@ -69,6 +69,12 @@ final class CacheManager { private final TreeMap entriesByDirective = new TreeMap(); + /** + * Cache entries, sorted by path + */ + private final TreeMap> entriesByPath = + new TreeMap>(); + /** * Cache pools, sorted by name. */ @@ -90,9 +96,14 @@ final class CacheManager { */ private final int maxListCacheDirectivesResponses; - CacheManager(FSDirectory dir, Configuration conf) { + final private FSNamesystem namesystem; + final private FSDirectory dir; + + CacheManager(FSNamesystem namesystem, FSDirectory dir, Configuration conf) { // TODO: support loading and storing of the CacheManager state clear(); + this.namesystem = namesystem; + this.dir = dir; maxListCachePoolsResponses = conf.getInt( DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT); @@ -104,6 +115,7 @@ final class CacheManager { synchronized void clear() { entriesById.clear(); entriesByDirective.clear(); + entriesByPath.clear(); cachePools.clear(); nextEntryId = 1; } @@ -131,7 +143,8 @@ private synchronized Fallible addDirective( try { directive.validate(); } catch (IOException ioe) { - LOG.info("addDirective " + directive + ": validation failed."); + LOG.info("addDirective " + directive + ": validation failed: " + + ioe.getClass().getName() + ": " + ioe.getMessage()); return new Fallible(ioe); } // Check if we already have this entry. @@ -152,8 +165,34 @@ private synchronized Fallible addDirective( } LOG.info("addDirective " + directive + ": added cache directive " + directive); + + // Success! + // First, add it to the various maps entriesByDirective.put(directive, entry); entriesById.put(entry.getEntryId(), entry); + String path = directive.getPath(); + List entryList = entriesByPath.get(path); + if (entryList == null) { + entryList = new ArrayList(1); + entriesByPath.put(path, entryList); + } + entryList.add(entry); + + // Next, set the path as cached in the namesystem + try { + INode node = dir.getINode(directive.getPath()); + if (node.isFile()) { + INodeFile file = node.asFile(); + // TODO: adjustable cache replication factor + namesystem.setCacheReplicationInt(directive.getPath(), + file.getBlockReplication()); + } + } catch (IOException ioe) { + LOG.info("addDirective " + directive +": failed to cache file: " + + ioe.getClass().getName() +": " + ioe.getMessage()); + return new Fallible(ioe); + } + return new Fallible(entry); } @@ -201,7 +240,31 @@ private synchronized Fallible removeEntry(long entryId, return new Fallible( new UnexpectedRemovePathBasedCacheEntryException(entryId)); } + // Remove the corresponding entry in entriesByPath. + String path = existing.getDirective().getPath(); + List entries = entriesByPath.get(path); + if (entries == null || !entries.remove(existing)) { + return new Fallible( + new UnexpectedRemovePathBasedCacheEntryException(entryId)); + } + if (entries.size() == 0) { + entriesByPath.remove(path); + } entriesById.remove(entryId); + + // Set the path as uncached in the namesystem + try { + INode node = dir.getINode(existing.getDirective().getPath()); + if (node.isFile()) { + namesystem.setCacheReplicationInt(existing.getDirective().getPath(), + (short) 0); + } + } catch (IOException e) { + LOG.warn("removeEntry " + entryId + ": failure while setting cache" + + " replication factor", e); + return new Fallible(e); + } + LOG.info("removeEntry successful for PathCacheEntry id " + entryId); return new Fallible(entryId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9523a50a47d..1822764bebb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1091,6 +1091,52 @@ Block[] unprotectedSetReplication(String src, short replication, return file.getBlocks(); } + /** + * Set cache replication for a file + * + * @param src file name + * @param replication new replication + * @param blockRepls block replications - output parameter + * @return array of file blocks + * @throws QuotaExceededException + * @throws SnapshotAccessControlException + */ + Block[] setCacheReplication(String src, short replication, short[] blockRepls) + throws QuotaExceededException, UnresolvedLinkException, + SnapshotAccessControlException { + waitForReady(); + writeLock(); + try { + return unprotectedSetCacheReplication(src, replication, blockRepls); + } finally { + writeUnlock(); + } + } + + Block[] unprotectedSetCacheReplication(String src, short replication, + short[] blockRepls) throws QuotaExceededException, + UnresolvedLinkException, SnapshotAccessControlException { + assert hasWriteLock(); + + final INodesInPath iip = rootDir.getINodesInPath4Write(src, true); + final INode inode = iip.getLastINode(); + if (inode == null || !inode.isFile()) { + return null; + } + INodeFile file = inode.asFile(); + final short oldBR = file.getCacheReplication(); + + // TODO: Update quotas here as repl goes up or down + file.setCacheReplication(replication); + final short newBR = file.getCacheReplication(); + + if (blockRepls != null) { + blockRepls[0] = oldBR; + blockRepls[1] = newBR; + } + return file.getBlocks(); + } + /** * @param path the file path * @return the block size of the file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 720a5a96db1..8d41ca58104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -367,6 +367,7 @@ private void logAuditEvent(boolean succeeded, private final BlockManager blockManager; private final SnapshotManager snapshotManager; private final CacheManager cacheManager; + private final CacheReplicationManager cacheReplicationManager; private final DatanodeStatistics datanodeStatistics; // Block pool ID used by this namenode @@ -694,7 +695,9 @@ public static FSNamesystem loadFromDisk(Configuration conf) this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(fsImage, this, conf); this.snapshotManager = new SnapshotManager(dir); - this.cacheManager= new CacheManager(dir, conf); + this.cacheManager = new CacheManager(this, dir, conf); + this.cacheReplicationManager = new CacheReplicationManager(this, + blockManager, blockManager.getDatanodeManager(), this, conf); this.safeMode = new SafeModeInfo(conf); this.auditLoggers = initAuditLoggers(conf); this.isDefaultAuditLogger = auditLoggers.size() == 1 && @@ -871,6 +874,7 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep getCompleteBlocksTotal()); setBlockTotal(); blockManager.activate(conf); + cacheReplicationManager.activate(); } finally { writeUnlock(); } @@ -887,6 +891,7 @@ void stopCommonServices() { writeLock(); try { if (blockManager != null) blockManager.close(); + if (cacheReplicationManager != null) cacheReplicationManager.close(); } finally { writeUnlock(); } @@ -917,7 +922,9 @@ void startActiveServices() throws IOException { blockManager.getDatanodeManager().markAllDatanodesStale(); blockManager.clearQueues(); blockManager.processAllPendingDNMessages(); - + + cacheReplicationManager.clearQueues(); + if (!isInSafeMode() || (isInSafeMode() && safeMode.isPopulatingReplQueues())) { LOG.info("Reprocessing replication and invalidation queues"); @@ -1910,6 +1917,42 @@ private boolean setReplicationInt(String src, final short replication) return isFile; } + boolean setCacheReplicationInt(String src, final short replication) + throws IOException { + final boolean isFile; + FSPermissionChecker pc = getPermissionChecker(); + checkOperation(OperationCategory.WRITE); + byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + writeLock(); + try { + checkOperation(OperationCategory.WRITE); + if (isInSafeMode()) { + throw new SafeModeException("Cannot set replication for " + src, safeMode); + } + src = FSDirectory.resolvePath(src, pathComponents, dir); + if (isPermissionEnabled) { + checkPathAccess(pc, src, FsAction.WRITE); + } + + final short[] blockRepls = new short[2]; // 0: old, 1: new + final Block[] blocks = dir.setCacheReplication(src, replication, + blockRepls); + isFile = (blocks != null); + if (isFile) { + cacheReplicationManager.setCacheReplication(blockRepls[0], + blockRepls[1], src, blocks); + } + } finally { + writeUnlock(); + } + + getEditLog().logSync(); + if (isFile) { + logAuditEvent(true, "setReplication", src); + } + return isFile; + } + long getPreferredBlockSize(String filename) throws IOException, UnresolvedLinkException { FSPermissionChecker pc = getPermissionChecker(); @@ -6391,6 +6434,14 @@ public BlockManager getBlockManager() { public FSDirectory getFSDirectory() { return dir; } + /** @return the cache manager. */ + public CacheManager getCacheManager() { + return cacheManager; + } + /** @return the cache replication manager. */ + public CacheReplicationManager getCacheReplicationManager() { + return cacheReplicationManager; + } @Override // NameNodeMXBean public String getCorruptFiles() { @@ -6959,10 +7010,6 @@ public BatchedListEntries listCachePools(String prevKey) return results; } - public CacheManager getCacheManager() { - return cacheManager; - } - /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 455d808a37f..15a7d2c8e8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -104,6 +104,8 @@ static long combinePreferredBlockSize(long header, long blockSize) { private BlockInfo[] blocks; + private short cacheReplication = 0; + INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime, BlockInfo[] blklist, short replication, long preferredBlockSize) { super(id, name, permissions, mtime, atime); @@ -199,6 +201,18 @@ public final INodeFile setFileReplication(short replication, Snapshot latest, return nodeToUpdate; } + @Override + public void setCacheReplication(short cacheReplication) { + Preconditions.checkArgument(cacheReplication <= getBlockReplication(), + "Cannot set cache replication higher than block replication factor"); + this.cacheReplication = cacheReplication; + } + + @Override + public short getCacheReplication() { + return cacheReplication; + } + /** @return preferred block size (in bytes) of the file. */ @Override public long getPreferredBlockSize() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index a78befb82f3..33f7815c7fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -968,7 +968,14 @@ public DatanodeCommand cacheReport(DatanodeRegistration nodeReg, String poolId, long[] blocks) throws IOException { verifyRequest(nodeReg); BlockListAsLongs blist = new BlockListAsLongs(blocks); - namesystem.getBlockManager().processCacheReport(nodeReg, poolId, blist); + if (blockStateChangeLog.isDebugEnabled()) { + blockStateChangeLog.debug("*BLOCK* NameNode.cacheReport: " + + "from " + nodeReg + " " + blist.getNumberOfBlocks() + + " blocks"); + } + + namesystem.getCacheReplicationManager() + .processCacheReport(nodeReg, poolId, blist); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index d02186d34b6..a5d312dea70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -79,6 +79,8 @@ public class NameNodeMetrics { MutableCounterLong transactionsBatchedInSync; @Metric("Block report") MutableRate blockReport; MutableQuantiles[] blockReportQuantiles; + @Metric("Cache report") MutableRate cacheReport; + MutableQuantiles[] cacheReportQuantiles; @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime; @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime; @@ -89,6 +91,7 @@ public class NameNodeMetrics { final int len = intervals.length; syncsQuantiles = new MutableQuantiles[len]; blockReportQuantiles = new MutableQuantiles[len]; + cacheReportQuantiles = new MutableQuantiles[len]; for (int i = 0; i < len; i++) { int interval = intervals[i]; @@ -98,6 +101,9 @@ public class NameNodeMetrics { blockReportQuantiles[i] = registry.newQuantiles( "blockReport" + interval + "s", "Block report", "ops", "latency", interval); + cacheReportQuantiles[i] = registry.newQuantiles( + "cacheReport" + interval + "s", + "Cache report", "ops", "latency", interval); } } @@ -227,6 +233,13 @@ public void addBlockReport(long latency) { } } + public void addCacheBlockReport(long latency) { + cacheReport.add(latency); + for (MutableQuantiles q : cacheReportQuantiles) { + q.add(latency); + } + } + public void setSafeModeTime(long elapsed) { safeModeTime.set((int) elapsed); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java new file mode 100644 index 00000000000..557a9bf91b7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.blockmanagement; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; +import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.util.Fallible; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestCacheReplicationManager { + + // Most Linux installs allow a default of 64KB locked memory + private static final long CACHE_CAPACITY = 64 * 1024; + private static final long BLOCK_SIZE = 4096; + + private static Configuration conf; + private static MiniDFSCluster cluster = null; + private static FileSystem fs; + private static NameNode nn; + private static NamenodeProtocols nnRpc; + private static CacheReplicationManager cacheReplManager; + final private static FileSystemTestHelper helper = new FileSystemTestHelper(); + private static Path rootDir; + + @Before + public void setUp() throws Exception { + + assumeTrue(NativeIO.isAvailable()); + + conf = new HdfsConfiguration(); + conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + CACHE_CAPACITY); + conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, true); + conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); + + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1).build(); + cluster.waitActive(); + + fs = cluster.getFileSystem(); + nn = cluster.getNameNode(); + nnRpc = nn.getRpcServer(); + cacheReplManager = nn.getNamesystem().getCacheReplicationManager(); + rootDir = helper.getDefaultWorkingDirectory(fs); + } + + @After + public void tearDown() throws Exception { + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + } + + private int countNumCachedBlocks() { + return cacheReplManager.cachedBlocksMap.size(); + } + + private void waitForExpectedNumCachedBlocks(final int expected) + throws Exception { + int actual = countNumCachedBlocks(); + while (expected != actual) { + Thread.sleep(500); + actual = countNumCachedBlocks(); + } + } + + @Test(timeout=60000) + public void testCachePaths() throws Exception { + // Create the pool + final String pool = "friendlyPool"; + nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); + // Create some test files + final int numFiles = 3; + final int numBlocksPerFile = 2; + final List paths = new ArrayList(numFiles); + for (int i=0; i toAdd = + new ArrayList(); + toAdd.add(new PathBasedCacheDirective(paths.get(i), pool)); + List> fallibles = + nnRpc.addPathBasedCacheDirectives(toAdd); + assertEquals("Unexpected number of fallibles", + 1, fallibles.size()); + PathBasedCacheEntry entry = fallibles.get(0).get(); + PathBasedCacheDirective directive = entry.getDirective(); + assertEquals("Directive does not match requested path", paths.get(i), + directive.getPath()); + assertEquals("Directive does not match requested pool", pool, + directive.getPool()); + expected += numBlocksPerFile; + waitForExpectedNumCachedBlocks(expected); + } + // Uncache and check each path in sequence + RemoteIterator entries = + nnRpc.listPathBasedCacheEntries(0, null, null); + for (int i=0; i toRemove = new ArrayList(); + toRemove.add(entry.getEntryId()); + List> fallibles = nnRpc.removePathBasedCacheEntries(toRemove); + assertEquals("Unexpected number of fallibles", 1, fallibles.size()); + Long l = fallibles.get(0).get(); + assertEquals("Removed entryId does not match requested", + entry.getEntryId(), l.longValue()); + expected -= numBlocksPerFile; + waitForExpectedNumCachedBlocks(expected); + } + } +} From 50af34f778f9fde11ef5d209a1ba5a432cc9b48a Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Sat, 14 Sep 2013 00:05:29 +0000 Subject: [PATCH 19/51] HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit. (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523153 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/io/nativeio/NativeIO.java | 52 +++++-------------- .../org/apache/hadoop/io/nativeio/NativeIO.c | 51 +++++++----------- .../hadoop/io/nativeio/TestNativeIO.java | 2 +- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 4 ++ .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hadoop/hdfs/TestDatanodeConfig.java | 6 ++- 6 files changed, 44 insertions(+), 73 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 96193eed035..3d6ce7b6c0c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -272,44 +272,6 @@ public static void munlock(ByteBuffer buffer, long len) munlock_native(buffer, len); } - /** - * Resource limit types copied from - */ - private static class ResourceLimit { - public static final int RLIMIT_CPU = 0; - public static final int RLIMIT_FSIZE = 1; - public static final int RLIMIT_DATA = 2; - public static final int RLIMIT_STACK = 3; - public static final int RLIMIT_CORE = 4; - public static final int RLIMIT_RSS = 5; - public static final int RLIMIT_NPROC = 6; - public static final int RLIMIT_NOFILE = 7; - public static final int RLIMIT_MEMLOCK = 8; - public static final int RLIMIT_AS = 9; - public static final int RLIMIT_LOCKS = 10; - public static final int RLIMIT_SIGPENDING = 11; - public static final int RLIMIT_MSGQUEUE = 12; - public static final int RLIMIT_NICE = 13; - public static final int RLIMIT_RTPRIO = 14; - public static final int RLIMIT_RTTIME = 15; - public static final int RLIMIT_NLIMITS = 16; - } - - static native String getrlimit(int limit) throws NativeIOException; - /** - * Returns the soft limit on the number of bytes that may be locked by the - * process in bytes (RLIMIT_MEMLOCK). - * - * See the getrlimit(2) man page for more information - * - * @return maximum amount of locked memory in bytes - */ - public static long getMemlockLimit() throws IOException { - assertCodeLoaded(); - String strLimit = getrlimit(ResourceLimit.RLIMIT_MEMLOCK); - return Long.parseLong(strLimit); - } - /** Linux only methods used for getOwner() implementation */ private static native long getUIDforFDOwnerforOwner(FileDescriptor fd) throws IOException; private static native String getUserName(long uid) throws IOException; @@ -563,6 +525,20 @@ public static boolean isAvailable() { /** Initialize the JNI method ID and class ID cache */ private static native void initNative(); + /** + * Get the maximum number of bytes that can be locked into memory at any + * given point. + * + * @return 0 if no bytes can be locked into memory; + * Long.MAX_VALUE if there is no limit; + * The number of bytes that can be locked into memory otherwise. + */ + public static long getMemlockLimit() { + return isAvailable() ? getMemlockLimit0() : 0; + } + + private static native long getMemlockLimit0(); + private static class CachedUid { final long timestamp; final String username; diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 56f0f71eb5a..59a5f476228 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -16,8 +16,6 @@ * limitations under the License. */ -#define _GNU_SOURCE - #include "org_apache_hadoop.h" #include "org_apache_hadoop_io_nativeio_NativeIO.h" @@ -28,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -414,36 +413,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native( } } -/** - * public static native String getrlimit( - * int resource); - * - * The "00024" in the function name is an artifact of how JNI encodes - * special characters. U+0024 is '$'. - */ -JNIEXPORT jstring JNICALL -Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getrlimit( - JNIEnv *env, jclass clazz, - jint resource) -{ - jstring ret = NULL; - - struct rlimit rlim; - int rc = getrlimit((int)resource, &rlim); - if (rc != 0) { - throw_ioe(env, errno); - goto cleanup; - } - - // Convert soft limit into a string - char limit[17]; - int len = snprintf(&limit, 17, "%d", rlim.rlim_cur); - ret = (*env)->NewStringUTF(env,&limit); - -cleanup: - return ret; -} - #ifdef __FreeBSD__ static int toFreeBSDFlags(int flags) { @@ -1008,6 +977,24 @@ done: #endif } +JNIEXPORT jlong JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0( +JNIEnv *env, jclass clazz) +{ +#ifdef WINDOWS + return 0; +#else + struct rlimit rlim; + int rc = getrlimit(RLIMIT_MEMLOCK, &rlim); + if (rc != 0) { + throw_ioe(env, errno); + return 0; + } + return (rlim.rlim_cur == RLIM_INFINITY) ? + INT64_MAX : rlim.rlim_cur; +#endif +} + /** * vim: sw=2: ts=2: et: */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index 917532e4bf8..144cb9c2c43 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -583,6 +583,6 @@ public void testMlock() throws Exception { @Test(timeout=10000) public void testGetMemlockLimit() throws Exception { assumeTrue(NativeIO.isAvailable()); - NativeIO.POSIX.getMemlockLimit(); + NativeIO.getMemlockLimit(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 27f8c10dfe8..06efddb2765 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -46,3 +46,7 @@ HDFS-4949 (Unreleased) cache report. (Contributed by Colin Patrick McCabe) HDFS-5195. Prevent passing null pointer to mlock and munlock. (cnauroth) + + HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit + (Contributed by Colin Patrick McCabe) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 65a1c922b94..778820b0a82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -745,7 +745,7 @@ void startDataNode(Configuration conf, " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); } - long ulimit = NativeIO.POSIX.getMemlockLimit(); + long ulimit = NativeIO.getMemlockLimit(); if (dnConf.maxLockedMemory > ulimit) { throw new RuntimeException(String.format( "Cannot start datanode because the configured max locked memory" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index f2166b74115..4bdcfee6357 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -113,11 +113,15 @@ private static String makeURI(String scheme, String host, String path) @Test(timeout=60000) public void testMemlockLimit() throws Exception { assumeTrue(NativeIO.isAvailable()); - final long memlockLimit = NativeIO.POSIX.getMemlockLimit(); + final long memlockLimit = NativeIO.getMemlockLimit(); Configuration conf = cluster.getConfiguration(0); // Try starting the DN with limit configured to the ulimit conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, memlockLimit); + if (memlockLimit == Long.MAX_VALUE) { + // Can't increase the memlock limit past the maximum. + return; + } DataNode dn = null; dn = DataNode.createDataNode(new String[]{}, conf); dn.shutdown(); From 68ec07cadef3b01db8ed668ea589774eb38417c0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Mon, 16 Sep 2013 05:35:25 +0000 Subject: [PATCH 20/51] HDFS-5197. Document dfs.cachereport.intervalMsec in hdfs-default.xml. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523543 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 ++ .../src/main/resources/hdfs-default.xml | 33 ++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 06efddb2765..c041cf4ed49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -36,6 +36,9 @@ HDFS-4949 (Unreleased) HDFS-5053. NameNode should invoke DataNode APIs to coordinate caching. (Andrew Wang) + HDFS-5197. Document dfs.cachereport.intervalMsec in hdfs-default.xml. + (cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 6e7a53debfd..e09e2fdcdf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1419,6 +1419,17 @@ + + dfs.namenode.caching.enabled + false + + Set to true to enable block caching. This flag enables the NameNode to + maintain a mapping of cached blocks to DataNodes via processing DataNode + cache reports. Based on these reports and addition and removal of caching + directives, the NameNode will schedule caching and uncaching work. + + + dfs.datanode.max.locked.memory 0 @@ -1428,7 +1439,10 @@ (RLIMIT_MEMLOCK) must be set to at least this value, else the datanode will abort on startup. - By default, this parameter set to 0, which disables in-memory caching. + By default, this parameter is set to 0, which disables in-memory caching. + + If the native libraries are not available to the DataNode, this + configuration has no effect. @@ -1442,4 +1456,21 @@ + + dfs.cachereport.intervalMsec + 10000 + + Determines cache reporting interval in milliseconds. After this amount of + time, the DataNode sends a full report of its cache state to the NameNode. + The NameNode uses the cache report to update its map of cached blocks to + DataNode locations. + + This configuration has no effect if in-memory caching has been disabled by + setting dfs.datanode.max.locked.memory to 0 (which is the default). + + If the native libraries are not available to the DataNode, this + configuration has no effect. + + + From 85c203602993a946fb5f41eadf1cf1484a0ce686 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 16 Sep 2013 18:41:27 +0000 Subject: [PATCH 21/51] HDFS-5210. Fix some failing unit tests on HDFS-4949 branch. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1523754 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 2 ++ .../CacheReplicationManager.java | 13 +++++---- .../CacheReplicationMonitor.java | 2 +- .../CacheReplicationPolicy.java | 14 +++++---- .../hdfs/server/datanode/BPOfferService.java | 6 ---- .../hdfs/server/datanode/BPServiceActor.java | 15 +++------- .../hadoop/hdfs/server/datanode/DataNode.java | 1 - .../fsdataset/impl/MappableBlock.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 2 ++ .../TestCacheReplicationManager.java | 29 ++++++++++++++++--- 10 files changed, 52 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index c041cf4ed49..78bdc796b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -53,3 +53,5 @@ HDFS-4949 (Unreleased) HDFS-5201. NativeIO: consolidate getrlimit into NativeIO#getMemlockLimit (Contributed by Colin Patrick McCabe) + HDFS-5210. Fix some failing unit tests on HDFS-4949 branch. + (Contributed by Andrew Wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java index fb269c7689d..d58f3081465 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java @@ -167,11 +167,13 @@ public void close() { } public void clearQueues() { - blocksToUncache.clear(); - synchronized (neededCacheBlocks) { - neededCacheBlocks.clear(); + if (isCachingEnabled) { + blocksToUncache.clear(); + synchronized (neededCacheBlocks) { + neededCacheBlocks.clear(); + } + pendingCacheBlocks.clear(); } - pendingCacheBlocks.clear(); } public boolean isCachingEnabled() { @@ -571,7 +573,8 @@ private void updateNeededCaching(final Block block, } /** - * Return the safely cached replicas of a block in a BlocksMap + * Return the safe replicas (not corrupt or decomissioning/decommissioned) of + * a block in a BlocksMap */ List getSafeReplicas(BlocksMap map, Block block) { List nodes = new ArrayList(3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index ce70b3f677f..b6255460a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -156,7 +156,7 @@ private void computeCachingWorkForBlocks(List blocksToCache) { } // Choose some replicas to cache if needed additionalRepl = requiredRepl - effectiveRepl; - targets = new ArrayList(storedNodes); + targets = new ArrayList(storedNodes.size()); // Only target replicas that aren't already cached. for (DatanodeDescriptor dn: storedNodes) { if (!cachedNodes.contains(dn)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java index 2674f6a0c77..3bd19331ea3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java @@ -35,6 +35,9 @@ @InterfaceAudience.LimitedPrivate({"HDFS"}) public class CacheReplicationPolicy { + // Not thread-safe, but only accessed by the CacheReplicationMonitor + private static RandomData random = new RandomDataImpl(); + /** * @return List of datanodes with sufficient capacity to cache the block */ @@ -53,8 +56,7 @@ private static List selectSufficientCapacity(Block block, /** * Returns a random datanode from targets, weighted by the amount of free - * cache capacity on the datanode. Prunes unsuitable datanodes from the - * targets list. + * cache capacity on the datanode. * * @param block Block to be cached * @param targets List of potential cache targets @@ -75,8 +77,7 @@ private static DatanodeDescriptor randomDatanodeByRemainingCache(Block block, lottery.put(totalCacheAvailable, dn); } // Pick our lottery winner - RandomData r = new RandomDataImpl(); - long winningTicket = r.nextLong(0, totalCacheAvailable - 1); + long winningTicket = random.nextLong(0, totalCacheAvailable - 1); Entry winner = lottery.higherEntry(winningTicket); return winner.getValue(); } @@ -94,7 +95,10 @@ static List chooseTargetsToCache(Block block, List chosen = new ArrayList(numTargets); for (int i = 0; i < numTargets && !sufficient.isEmpty(); i++) { - chosen.add(randomDatanodeByRemainingCache(block, sufficient)); + DatanodeDescriptor choice = + randomDatanodeByRemainingCache(block, sufficient); + chosen.add(choice); + sufficient.remove(choice); } return chosen; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 3c01345a3fe..bc78eda828a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -368,12 +368,6 @@ void scheduleBlockReport(long delay) { } } - void scheduleCacheReport(long delay) { - for (BPServiceActor actor: bpServices) { - actor.scheduleCacheReport(delay); - } - } - /** * Ask each of the actors to report a bad block hosted on another DN. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index b4912881a05..b96292410e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -242,17 +242,6 @@ void scheduleBlockReport(long delay) { resetBlockReportTime = true; // reset future BRs for randomness } - void scheduleCacheReport(long delay) { - if (delay > 0) { - // Uniform random jitter by the delay - lastCacheReport = Time.monotonicNow() - - dnConf.cacheReportInterval - + DFSUtil.getRandom().nextInt(((int)delay)); - } else { // send at next heartbeat - lastCacheReport = lastCacheReport - dnConf.cacheReportInterval; - } - } - void reportBadBlocks(ExtendedBlock block) { if (bpRegistration == null) { return; @@ -445,6 +434,10 @@ DatanodeCommand blockReport() throws IOException { } DatanodeCommand cacheReport() throws IOException { + // If caching is disabled, do not send a cache report + if (dn.getFSDataset().getCacheCapacity() == 0) { + return null; + } // send cache report if timer has expired. DatanodeCommand cmd = null; long startTime = Time.monotonicNow(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 778820b0a82..71158881345 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1916,7 +1916,6 @@ static StartupOption getStartupOption(Configuration conf) { public void scheduleAllBlockReport(long delay) { for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) { bpos.scheduleBlockReport(delay); - bpos.scheduleCacheReport(delay); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java index de0bcd35d7b..a2a9e6c5a2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java @@ -225,7 +225,7 @@ public void verifyChecksum() throws IOException, ChecksumException { blockBuf.flip(); // Number of read chunks, including partial chunk at end int chunks = (bytesRead+bytesPerChecksum-1) / bytesPerChecksum; - checksumBuf.limit(chunks*bytesPerChecksum); + checksumBuf.limit(chunks*checksumSize); fillBuffer(metaChannel, checksumBuf); checksumBuf.flip(); checksum.verifyChunkedSums(blockBuf, checksumBuf, block.getBlockName(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 5b82848015d..945b4250382 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -186,6 +186,8 @@ private synchronized Fallible addDirective( // TODO: adjustable cache replication factor namesystem.setCacheReplicationInt(directive.getPath(), file.getBlockReplication()); + } else { + LOG.warn("Path " + directive.getPath() + " is not a file"); } } catch (IOException ioe) { LOG.info("addDirective " + directive +": failed to cache file: " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java index 557a9bf91b7..8c7037c1e6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java @@ -48,9 +48,11 @@ public class TestCacheReplicationManager { + private static final long BLOCK_SIZE = 512; + private static final int REPL_FACTOR = 3; + private static final int NUM_DATANODES = 4; // Most Linux installs allow a default of 64KB locked memory - private static final long CACHE_CAPACITY = 64 * 1024; - private static final long BLOCK_SIZE = 4096; + private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES; private static Configuration conf; private static MiniDFSCluster cluster = null; @@ -75,7 +77,7 @@ public void setUp() throws Exception { conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(1).build(); + .numDataNodes(NUM_DATANODES).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -106,6 +108,25 @@ private void waitForExpectedNumCachedBlocks(final int expected) Thread.sleep(500); actual = countNumCachedBlocks(); } + waitForExpectedNumCachedReplicas(expected*REPL_FACTOR); + } + + private void waitForExpectedNumCachedReplicas(final int expected) + throws Exception { + BlocksMap cachedBlocksMap = cacheReplManager.cachedBlocksMap; + int actual = 0; + while (expected != actual) { + Thread.sleep(500); + nn.getNamesystem().readLock(); + try { + actual = 0; + for (BlockInfo b : cachedBlocksMap.getBlocks()) { + actual += cachedBlocksMap.numNodes(b); + } + } finally { + nn.getNamesystem().readUnlock(); + } + } } @Test(timeout=60000) @@ -114,7 +135,7 @@ public void testCachePaths() throws Exception { final String pool = "friendlyPool"; nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); // Create some test files - final int numFiles = 3; + final int numFiles = 2; final int numBlocksPerFile = 2; final List paths = new ArrayList(numFiles); for (int i=0; i Date: Wed, 18 Sep 2013 20:43:40 +0000 Subject: [PATCH 22/51] HDFS-5213. Separate PathBasedCacheEntry and PathBasedCacheDirectiveWithId. Contributed by Colin Patrick McCabe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1524561 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 +- .../hadoop/hdfs/DistributedFileSystem.java | 14 +- .../AddPathBasedCacheDirectiveException.java | 10 + .../hadoop/hdfs/protocol/ClientProtocol.java | 10 +- .../protocol/PathBasedCacheDescriptor.java | 77 ++++++++ .../protocol/PathBasedCacheDirective.java | 38 ++-- .../hdfs/protocol/PathBasedCacheEntry.java | 53 +++--- ...movePathBasedCacheDescriptorException.java | 79 ++++++++ ...amenodeProtocolServerSideTranslatorPB.java | 77 ++++---- .../ClientNamenodeProtocolTranslatorPB.java | 103 +++++----- .../hdfs/server/namenode/CacheManager.java | 179 +++++++++--------- .../hdfs/server/namenode/FSNamesystem.java | 22 +-- .../server/namenode/NameNodeRpcServer.java | 18 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 18 +- .../main/proto/ClientNamenodeProtocol.proto | 23 +-- .../TestCacheReplicationManager.java | 15 +- .../namenode/TestPathBasedCacheRequests.java | 47 +++-- 18 files changed, 483 insertions(+), 309 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 78bdc796b6a..a0d03857f2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -39,6 +39,9 @@ HDFS-4949 (Unreleased) HDFS-5197. Document dfs.cachereport.intervalMsec in hdfs-default.xml. (cnauroth) + HDFS-5213. Separate PathBasedCacheEntry and PathBasedCacheDirectiveWithId. + (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 58d03ff26ba..960d80815cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -200,9 +200,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES = "dfs.namenode.list.cache.pools.num.responses"; public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100; - public static final String DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES = - "dfs.namenode.list.cache.directives.num.responses"; - public static final int DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT = 100; + public static final String DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES = + "dfs.namenode.list.cache.descriptors.num.responses"; + public static final int DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT = 100; // Whether to enable datanode's stale state detection and usage for reads public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c2cdcb0afaf..b6b412a6101 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -68,7 +68,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -1591,7 +1591,7 @@ public Boolean next(final FileSystem fs, final Path p) * PathBasedCache entry, or an IOException describing why the directive * could not be added. */ - public List> + public List> addPathBasedCacheDirective(List directives) throws IOException { return dfs.namenode.addPathBasedCacheDirectives(directives); @@ -1605,8 +1605,8 @@ public Boolean next(final FileSystem fs, final Path p) * ID, or an IOException describing why the ID could not be removed. */ public List> - removePathBasedCacheEntries(List ids) throws IOException { - return dfs.namenode.removePathBasedCacheEntries(ids); + removePathBasedCacheDescriptors(List ids) throws IOException { + return dfs.namenode.removePathBasedCacheDescriptors(ids); } /** @@ -1615,11 +1615,11 @@ public Boolean next(final FileSystem fs, final Path p) * * @param pool The cache pool to list, or null to list all pools. * @param path The path name to list, or null to list all paths. - * @return A RemoteIterator which returns PathBasedCacheEntry objects. + * @return A RemoteIterator which returns PathBasedCacheDescriptor objects. */ - public RemoteIterator listPathBasedCacheEntries( + public RemoteIterator listPathBasedCacheDescriptors( String pool, String path) throws IOException { - return dfs.namenode.listPathBasedCacheEntries(0, pool, path); + return dfs.namenode.listPathBasedCacheDescriptors(0, pool, path); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java index 457984353b6..2a95a81996e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java @@ -75,6 +75,16 @@ public PoolWritePermissionDeniedError(PathBasedCacheDirective directive) { } } + public static class PathAlreadyExistsInPoolError + extends AddPathBasedCacheDirectiveException { + private static final long serialVersionUID = 1L; + + public PathAlreadyExistsInPoolError(PathBasedCacheDirective directive) { + super("path " + directive.getPath() + " already exists in pool " + + directive.getPool(), directive); + } + } + public static class UnexpectedAddPathBasedCacheDirectiveException extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 50f645ed657..d33e7f94e1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1106,7 +1106,7 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, * could not be added. */ @AtMostOnce - public List> + public List> addPathBasedCacheDirectives(List directives) throws IOException; @@ -1118,7 +1118,7 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, * ID, or an IOException describing why the ID could not be removed. */ @AtMostOnce - public List> removePathBasedCacheEntries(List ids) + public List> removePathBasedCacheDescriptors(List ids) throws IOException; /** @@ -1126,13 +1126,13 @@ public List> removePathBasedCacheEntries(List ids) * from the server. * * @param prevId The last listed entry ID, or -1 if this is the first call to - * listPathBasedCacheEntries. + * listPathBasedCacheDescriptors. * @param pool The cache pool to list, or null to list all pools. * @param path The path name to list, or null to list all paths. - * @return A RemoteIterator which returns PathBasedCacheEntry objects. + * @return A RemoteIterator which returns PathBasedCacheDescriptor objects. */ @Idempotent - public RemoteIterator listPathBasedCacheEntries(long prevId, + public RemoteIterator listPathBasedCacheDescriptors(long prevId, String pool, String path) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java new file mode 100644 index 00000000000..2d27942c373 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; + +import com.google.common.base.Preconditions; + +/** + * A directive in a cache pool that includes an identifying ID number. + */ +@InterfaceStability.Evolving +@InterfaceAudience.Public +public final class PathBasedCacheDescriptor extends PathBasedCacheDirective { + private final long entryId; + + public PathBasedCacheDescriptor(long entryId, String path, String pool) { + super(path, pool); + Preconditions.checkArgument(entryId > 0); + this.entryId = entryId; + } + + public long getEntryId() { + return entryId; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + PathBasedCacheDescriptor other = (PathBasedCacheDescriptor)o; + return new EqualsBuilder().append(entryId, other.entryId). + append(getPath(), other.getPath()). + append(getPool(), other.getPool()). + isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(entryId). + append(getPath()). + append(getPool()). + hashCode(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("{ entryId:").append(entryId). + append(", path:").append(getPath()). + append(", pool:").append(getPool()). + append(" }"); + return builder.toString(); + } +}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java index c6ac9c8ed05..1f60616fc19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java @@ -20,9 +20,11 @@ import java.io.IOException; import com.google.common.base.Preconditions; -import com.google.common.collect.ComparisonChain; +import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; @@ -31,7 +33,9 @@ /** * A directive to add a path to a cache pool. */ -public class PathBasedCacheDirective implements Comparable { +@InterfaceStability.Evolving +@InterfaceAudience.Public +public class PathBasedCacheDirective { private final String path; private final String pool; @@ -76,26 +80,24 @@ public void validate() throws IOException { } @Override - public int compareTo(PathBasedCacheDirective rhs) { - return ComparisonChain.start(). - compare(pool, rhs.getPool()). - compare(path, rhs.getPath()). - result(); + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + PathBasedCacheDirective other = (PathBasedCacheDirective)o; + return new EqualsBuilder().append(getPath(), other.getPath()). + append(getPool(), other.getPool()). + isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder().append(path).append(pool).hashCode(); - } - - @Override - public boolean equals(Object o) { - try { - PathBasedCacheDirective other = (PathBasedCacheDirective)o; - return other.compareTo(this) == 0; - } catch (ClassCastException e) { - return false; - } + return new HashCodeBuilder().append(getPath()). + append(getPool()). + hashCode(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index 7640c903373..292c3f563c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -17,59 +17,54 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.commons.lang.builder.EqualsBuilder; -import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import com.google.common.base.Preconditions; /** - * An entry in the NameNode's PathBasedCache. + * Represents an entry in the PathBasedCache on the NameNode. + * + * This is an implementation class, not part of the public API. */ +@InterfaceAudience.Private public final class PathBasedCacheEntry { private final long entryId; - private final PathBasedCacheDirective directive; + private final String path; + private final CachePool pool; - public PathBasedCacheEntry(long entryId, PathBasedCacheDirective directive) { + public PathBasedCacheEntry(long entryId, String path, CachePool pool) { Preconditions.checkArgument(entryId > 0); this.entryId = entryId; - this.directive = directive; + Preconditions.checkNotNull(path); + this.path = path; + Preconditions.checkNotNull(pool); + this.pool = pool; } public long getEntryId() { return entryId; } - public PathBasedCacheDirective getDirective() { - return directive; + public String getPath() { + return path; } - @Override - public boolean equals(Object o) { - try { - PathBasedCacheEntry other = (PathBasedCacheEntry)o; - return new EqualsBuilder(). - append(this.entryId, other.entryId). - append(this.directive, other.directive). - isEquals(); - } catch (ClassCastException e) { - return false; - } - } - - @Override - public int hashCode() { - return new HashCodeBuilder(). - append(entryId). - append(directive). - hashCode(); + public CachePool getPool() { + return pool; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ entryId:").append(entryId). - append(", directive:").append(directive.toString()). - append(" }"); + append(", path:").append(path). + append(", pool:").append(pool). + append(" }"); return builder.toString(); } + + public PathBasedCacheDescriptor getDescriptor() { + return new PathBasedCacheDescriptor(entryId, path, pool.getName()); + } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java new file mode 100644 index 00000000000..8ef8c80a5af --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import com.google.common.base.Preconditions; + +/** + * An exception which occurred when trying to remove a PathBasedCache entry. + */ +public abstract class RemovePathBasedCacheDescriptorException extends IOException { + private static final long serialVersionUID = 1L; + + private final long entryId; + + public RemovePathBasedCacheDescriptorException(String description, long entryId) { + super(description); + this.entryId = entryId; + } + + public long getEntryId() { + return this.entryId; + } + + public final static class InvalidIdException + extends RemovePathBasedCacheDescriptorException { + private static final long serialVersionUID = 1L; + + public InvalidIdException(long entryId) { + super("invalid PathBasedCacheDescriptor id " + entryId, entryId); + } + } + + public final static class RemovePermissionDeniedException + extends RemovePathBasedCacheDescriptorException { + private static final long serialVersionUID = 1L; + + public RemovePermissionDeniedException(long entryId) { + super("permission denied when trying to remove " + + "PathBasedCacheDescriptor id " + entryId, entryId); + } + } + + public final static class NoSuchIdException + extends RemovePathBasedCacheDescriptorException { + private static final long serialVersionUID = 1L; + + public NoSuchIdException(long entryId) { + super("there is no PathBasedCacheDescriptor with id " + entryId, + entryId); + } + } + + public final static class UnexpectedRemovePathBasedCacheDescriptorException + extends RemovePathBasedCacheDescriptorException { + private static final long serialVersionUID = 1L; + + public UnexpectedRemovePathBasedCacheDescriptorException(long id) { + super("encountered an unexpected error when trying to " + + "remove PathBasedCacheDescriptor with id " + id, id); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index a58e3d95e6e..48ebd9095e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PathAlreadyExistsInPoolError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -40,10 +41,10 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; @@ -115,9 +116,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; @@ -131,9 +132,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -1048,13 +1049,13 @@ public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcC PathBasedCacheDirectiveProto proto = request.getElements(i); input.add(new PathBasedCacheDirective(proto.getPath(), proto.getPool())); } - List> output = server.addPathBasedCacheDirectives(input); + List> output = server.addPathBasedCacheDirectives(input); AddPathBasedCacheDirectivesResponseProto.Builder builder = AddPathBasedCacheDirectivesResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { try { - PathBasedCacheEntry entry = output.get(idx).get(); - builder.addResults(entry.getEntryId()); + PathBasedCacheDescriptor directive = output.get(idx).get(); + builder.addResults(directive.getEntryId()); } catch (IOException ioe) { if (ioe.getCause() instanceof EmptyPathError) { builder.addResults(AddPathBasedCacheDirectiveErrorProto. @@ -1068,6 +1069,9 @@ public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcC } else if (ioe.getCause() instanceof PoolWritePermissionDeniedError) { builder.addResults(AddPathBasedCacheDirectiveErrorProto. ADD_PERMISSION_DENIED_ERROR_VALUE); + } else if (ioe.getCause() instanceof PathAlreadyExistsInPoolError) { + builder.addResults(AddPathBasedCacheDirectiveErrorProto. + PATH_ALREADY_EXISTS_IN_POOL_ERROR_VALUE); } else { builder.addResults(AddPathBasedCacheDirectiveErrorProto. UNEXPECTED_ADD_ERROR_VALUE); @@ -1081,29 +1085,29 @@ public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcC } @Override - public RemovePathBasedCacheEntriesResponseProto removePathBasedCacheEntries( - RpcController controller, RemovePathBasedCacheEntriesRequestProto request) + public RemovePathBasedCacheDescriptorsResponseProto removePathBasedCacheDescriptors( + RpcController controller, RemovePathBasedCacheDescriptorsRequestProto request) throws ServiceException { try { List> output = - server.removePathBasedCacheEntries(request.getElementsList()); - RemovePathBasedCacheEntriesResponseProto.Builder builder = - RemovePathBasedCacheEntriesResponseProto.newBuilder(); + server.removePathBasedCacheDescriptors(request.getElementsList()); + RemovePathBasedCacheDescriptorsResponseProto.Builder builder = + RemovePathBasedCacheDescriptorsResponseProto.newBuilder(); for (int idx = 0; idx < output.size(); idx++) { try { long id = output.get(idx).get(); builder.addResults(id); } catch (InvalidIdException ioe) { - builder.addResults(RemovePathBasedCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheDescriptorErrorProto. INVALID_CACHED_PATH_ID_ERROR_VALUE); } catch (NoSuchIdException ioe) { - builder.addResults(RemovePathBasedCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheDescriptorErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE); } catch (RemovePermissionDeniedException ioe) { - builder.addResults(RemovePathBasedCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheDescriptorErrorProto. REMOVE_PERMISSION_DENIED_ERROR_VALUE); } catch (IOException ioe) { - builder.addResults(RemovePathBasedCacheEntryErrorProto. + builder.addResults(RemovePathBasedCacheDescriptorErrorProto. UNEXPECTED_REMOVE_ERROR_VALUE); } } @@ -1114,31 +1118,32 @@ public RemovePathBasedCacheEntriesResponseProto removePathBasedCacheEntries( } @Override - public ListPathBasedCacheEntriesResponseProto listPathBasedCacheEntries( - RpcController controller, ListPathBasedCacheEntriesRequestProto request) + public ListPathBasedCacheDescriptorsResponseProto listPathBasedCacheDescriptors( + RpcController controller, ListPathBasedCacheDescriptorsRequestProto request) throws ServiceException { try { - RemoteIterator iter = - server.listPathBasedCacheEntries(request.getPrevId(), + RemoteIterator iter = + server.listPathBasedCacheDescriptors(request.getPrevId(), request.hasPool() ? request.getPool() : null, request.hasPath() ? request.getPath() : null); - ListPathBasedCacheEntriesResponseProto.Builder builder = - ListPathBasedCacheEntriesResponseProto.newBuilder(); + ListPathBasedCacheDescriptorsResponseProto.Builder builder = + ListPathBasedCacheDescriptorsResponseProto.newBuilder(); long prevId = 0; while (iter.hasNext()) { - PathBasedCacheEntry entry = iter.next(); + PathBasedCacheDescriptor directive = iter.next(); builder.addElements( - ListPathBasedCacheEntriesElementProto.newBuilder(). - setId(entry.getEntryId()). - setPath(entry.getDirective().getPath()). - setPool(entry.getDirective().getPool())); - prevId = entry.getEntryId(); + ListPathBasedCacheDescriptorsElementProto.newBuilder(). + setId(directive.getEntryId()). + setPath(directive.getPath()). + setPool(directive.getPool())); + prevId = directive.getEntryId(); } if (prevId == 0) { builder.setHasMore(false); } else { - iter = server.listPathBasedCacheEntries(prevId, request.getPool(), - request.getPath()); + iter = server.listPathBasedCacheDescriptors(prevId, + request.hasPool() ? request.getPool() : null, + request.hasPath() ? request.getPath() : null); builder.setHasMore(iter.hasNext()); } return builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index ce59605c9a5..b389b026027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -39,15 +39,17 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PathAlreadyExistsInPoolError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.UnexpectedRemovePathBasedCacheEntryException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -108,10 +110,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheEntriesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; @@ -121,9 +123,9 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntriesResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheEntryErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -145,6 +147,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.server.namenode.CacheManager; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -1027,13 +1030,19 @@ private static IOException addPathBasedCacheDirectivesError(long code, } else if (code == AddPathBasedCacheDirectiveErrorProto. INVALID_POOL_NAME_ERROR_VALUE) { return new InvalidPoolNameError(directive); + } else if (code == AddPathBasedCacheDirectiveErrorProto. + ADD_PERMISSION_DENIED_ERROR_VALUE) { + return new PoolWritePermissionDeniedError(directive); + } else if (code == AddPathBasedCacheDirectiveErrorProto. + PATH_ALREADY_EXISTS_IN_POOL_ERROR_VALUE) { + return new PathAlreadyExistsInPoolError(directive); } else { return new UnexpectedAddPathBasedCacheDirectiveException(directive); } } @Override - public List> addPathBasedCacheDirectives( + public List> addPathBasedCacheDirectives( List directives) throws IOException { try { AddPathBasedCacheDirectivesRequestProto.Builder builder = @@ -1047,17 +1056,18 @@ public List> addPathBasedCacheDirectives( AddPathBasedCacheDirectivesResponseProto result = rpcProxy.addPathBasedCacheDirectives(null, builder.build()); int resultsCount = result.getResultsCount(); - ArrayList> results = - new ArrayList>(resultsCount); + ArrayList> results = + new ArrayList>(resultsCount); for (int i = 0; i < resultsCount; i++) { PathBasedCacheDirective directive = directives.get(i); long code = result.getResults(i); if (code > 0) { - results.add(new Fallible( - new PathBasedCacheEntry(code, directive))); + results.add(new Fallible( + new PathBasedCacheDescriptor(code, + directive.getPath(), directive.getPool()))); } else { - results.add(new Fallible( - addPathBasedCacheDirectivesError(code, directive))); + results.add(new Fallible( + addPathBasedCacheDirectivesError(code, directive))); } } return results; @@ -1066,32 +1076,32 @@ public List> addPathBasedCacheDirectives( } } - private static IOException removePathBasedCacheEntriesError(long code, long id) { - if (code == RemovePathBasedCacheEntryErrorProto. + private static IOException removePathBasedCacheDescriptorsError(long code, long id) { + if (code == RemovePathBasedCacheDescriptorErrorProto. INVALID_CACHED_PATH_ID_ERROR_VALUE) { return new InvalidIdException(id); - } else if (code == RemovePathBasedCacheEntryErrorProto. + } else if (code == RemovePathBasedCacheDescriptorErrorProto. NO_SUCH_CACHED_PATH_ID_ERROR_VALUE) { return new NoSuchIdException(id); - } else if (code == RemovePathBasedCacheEntryErrorProto. + } else if (code == RemovePathBasedCacheDescriptorErrorProto. REMOVE_PERMISSION_DENIED_ERROR_VALUE) { return new RemovePermissionDeniedException(id); } else { - return new UnexpectedRemovePathBasedCacheEntryException(id); + return new UnexpectedRemovePathBasedCacheDescriptorException(id); } } @Override - public List> removePathBasedCacheEntries(List ids) + public List> removePathBasedCacheDescriptors(List ids) throws IOException { try { - RemovePathBasedCacheEntriesRequestProto.Builder builder = - RemovePathBasedCacheEntriesRequestProto.newBuilder(); + RemovePathBasedCacheDescriptorsRequestProto.Builder builder = + RemovePathBasedCacheDescriptorsRequestProto.newBuilder(); for (Long id : ids) { builder.addElements(id); } - RemovePathBasedCacheEntriesResponseProto result = - rpcProxy.removePathBasedCacheEntries(null, builder.build()); + RemovePathBasedCacheDescriptorsResponseProto result = + rpcProxy.removePathBasedCacheDescriptors(null, builder.build()); int resultsCount = result.getResultsCount(); ArrayList> results = new ArrayList>(resultsCount); @@ -1101,7 +1111,7 @@ public List> removePathBasedCacheEntries(List ids) results.add(new Fallible(code)); } else { results.add(new Fallible( - removePathBasedCacheEntriesError(code, ids.get(i)))); + removePathBasedCacheDescriptorsError(code, ids.get(i)))); } } return results; @@ -1111,20 +1121,19 @@ public List> removePathBasedCacheEntries(List ids) } private static class BatchedPathBasedCacheEntries - implements BatchedEntries { - private ListPathBasedCacheEntriesResponseProto response; + implements BatchedEntries { + private ListPathBasedCacheDescriptorsResponseProto response; - BatchedPathBasedCacheEntries(ListPathBasedCacheEntriesResponseProto response) { + BatchedPathBasedCacheEntries(ListPathBasedCacheDescriptorsResponseProto response) { this.response = response; } @Override - public PathBasedCacheEntry get(int i) { - ListPathBasedCacheEntriesElementProto elementProto = + public PathBasedCacheDescriptor get(int i) { + ListPathBasedCacheDescriptorsElementProto elementProto = response.getElements(i); - return new PathBasedCacheEntry(elementProto.getId(), - new PathBasedCacheDirective(elementProto.getPath(), - elementProto.getPool())); + return new PathBasedCacheDescriptor(elementProto.getId(), + elementProto.getPath(), elementProto.getPool()); } @Override @@ -1139,7 +1148,7 @@ public boolean hasMore() { } private class PathBasedCacheEntriesIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { private final String pool; private final String path; @@ -1150,20 +1159,20 @@ public PathBasedCacheEntriesIterator(long prevKey, String pool, String path) { } @Override - public BatchedEntries makeRequest( + public BatchedEntries makeRequest( Long nextKey) throws IOException { - ListPathBasedCacheEntriesResponseProto response; + ListPathBasedCacheDescriptorsResponseProto response; try { - ListPathBasedCacheEntriesRequestProto.Builder builder = - ListPathBasedCacheEntriesRequestProto.newBuilder().setPrevId(nextKey); + ListPathBasedCacheDescriptorsRequestProto.Builder builder = + ListPathBasedCacheDescriptorsRequestProto.newBuilder().setPrevId(nextKey); if (pool != null) { builder.setPool(pool); } if (path != null) { builder.setPath(path); } - ListPathBasedCacheEntriesRequestProto req = builder.build(); - response = rpcProxy.listPathBasedCacheEntries(null, req); + ListPathBasedCacheDescriptorsRequestProto req = builder.build(); + response = rpcProxy.listPathBasedCacheDescriptors(null, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -1171,13 +1180,13 @@ public BatchedEntries makeRequest( } @Override - public Long elementToPrevKey(PathBasedCacheEntry element) { + public Long elementToPrevKey(PathBasedCacheDescriptor element) { return element.getEntryId(); } } @Override - public RemoteIterator listPathBasedCacheEntries(long prevId, + public RemoteIterator listPathBasedCacheDescriptors(long prevId, String pool, String path) throws IOException { return new PathBasedCacheEntriesIterator(prevId, pool, path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 945b4250382..943ed507982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT; import java.io.IOException; import java.util.ArrayList; @@ -38,14 +38,15 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.UnexpectedRemovePathBasedCacheEntryException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; import org.apache.hadoop.util.Fallible; /** @@ -57,18 +58,12 @@ public final class CacheManager { /** * Cache entries, sorted by ID. * - * listPathBasedCacheEntries relies on the ordering of elements in this map + * listPathBasedCacheDescriptors relies on the ordering of elements in this map * to track what has already been listed by the client. */ private final TreeMap entriesById = new TreeMap(); - /** - * Cache entries, sorted by directive. - */ - private final TreeMap entriesByDirective = - new TreeMap(); - /** * Cache entries, sorted by path */ @@ -94,7 +89,7 @@ public final class CacheManager { /** * Maximum number of cache pool directives to list in one operation. */ - private final int maxListCacheDirectivesResponses; + private final int maxListCacheDescriptorsResponses; final private FSNamesystem namesystem; final private FSDirectory dir; @@ -107,14 +102,13 @@ public final class CacheManager { maxListCachePoolsResponses = conf.getInt( DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT); - maxListCacheDirectivesResponses = conf.getInt( - DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, - DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT); + maxListCacheDescriptorsResponses = conf.getInt( + DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, + DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT); } synchronized void clear() { entriesById.clear(); - entriesByDirective.clear(); entriesByPath.clear(); cachePools.clear(); nextEntryId = 1; @@ -127,17 +121,32 @@ synchronized long getNextEntryId() throws IOException { return nextEntryId++; } - private synchronized Fallible addDirective( + private synchronized PathBasedCacheEntry + findEntry(PathBasedCacheDirective directive) { + List existing = + entriesByPath.get(directive.getPath()); + if (existing == null) { + return null; + } + for (PathBasedCacheEntry entry : existing) { + if (entry.getPool().getName().equals(directive.getPool())) { + return entry; + } + } + return null; + } + + private synchronized Fallible addDirective( PathBasedCacheDirective directive, FSPermissionChecker pc) { CachePool pool = cachePools.get(directive.getPool()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); - return new Fallible( + return new Fallible( new InvalidPoolNameError(directive)); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("addDirective " + directive + ": write permission denied."); - return new Fallible( + return new Fallible( new PoolWritePermissionDeniedError(directive)); } try { @@ -145,22 +154,24 @@ private synchronized Fallible addDirective( } catch (IOException ioe) { LOG.info("addDirective " + directive + ": validation failed: " + ioe.getClass().getName() + ": " + ioe.getMessage()); - return new Fallible(ioe); + return new Fallible(ioe); } + // Check if we already have this entry. - PathBasedCacheEntry existing = entriesByDirective.get(directive); + PathBasedCacheEntry existing = findEntry(directive); if (existing != null) { - // Entry already exists: return existing entry. LOG.info("addDirective " + directive + ": there is an " + - "existing directive " + existing); - return new Fallible(existing); + "existing directive " + existing + " in this pool."); + return new Fallible( + existing.getDescriptor()); } // Add a new entry with the next available ID. PathBasedCacheEntry entry; try { - entry = new PathBasedCacheEntry(getNextEntryId(), directive); + entry = new PathBasedCacheEntry(getNextEntryId(), + directive.getPath(), pool); } catch (IOException ioe) { - return new Fallible( + return new Fallible( new UnexpectedAddPathBasedCacheDirectiveException(directive)); } LOG.info("addDirective " + directive + ": added cache directive " @@ -168,7 +179,6 @@ private synchronized Fallible addDirective( // Success! // First, add it to the various maps - entriesByDirective.put(directive, entry); entriesById.put(entry.getEntryId(), entry); String path = directive.getPath(); List entryList = entriesByPath.get(path); @@ -181,7 +191,7 @@ private synchronized Fallible addDirective( // Next, set the path as cached in the namesystem try { INode node = dir.getINode(directive.getPath()); - if (node.isFile()) { + if (node != null && node.isFile()) { INodeFile file = node.asFile(); // TODO: adjustable cache replication factor namesystem.setCacheReplicationInt(directive.getPath(), @@ -192,96 +202,90 @@ private synchronized Fallible addDirective( } catch (IOException ioe) { LOG.info("addDirective " + directive +": failed to cache file: " + ioe.getClass().getName() +": " + ioe.getMessage()); - return new Fallible(ioe); + return new Fallible(ioe); } - - return new Fallible(entry); + return new Fallible( + entry.getDescriptor()); } - public synchronized List> addDirectives( + public synchronized List> addDirectives( List directives, FSPermissionChecker pc) { - ArrayList> results = - new ArrayList>(directives.size()); + ArrayList> results = + new ArrayList>(directives.size()); for (PathBasedCacheDirective directive: directives) { results.add(addDirective(directive, pc)); } return results; } - private synchronized Fallible removeEntry(long entryId, + private synchronized Fallible removeDescriptor(long id, FSPermissionChecker pc) { // Check for invalid IDs. - if (entryId <= 0) { - LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); - return new Fallible(new InvalidIdException(entryId)); + if (id <= 0) { + LOG.info("removeDescriptor " + id + ": invalid non-positive " + + "descriptor ID."); + return new Fallible(new InvalidIdException(id)); } // Find the entry. - PathBasedCacheEntry existing = entriesById.get(entryId); + PathBasedCacheEntry existing = entriesById.get(id); if (existing == null) { - LOG.info("removeEntry " + entryId + ": entry not found."); - return new Fallible(new NoSuchIdException(entryId)); + LOG.info("removeDescriptor " + id + ": entry not found."); + return new Fallible(new NoSuchIdException(id)); } - CachePool pool = cachePools.get(existing.getDirective().getPool()); + CachePool pool = cachePools.get(existing.getDescriptor().getPool()); if (pool == null) { - LOG.info("removeEntry " + entryId + ": pool not found for directive " + - existing.getDirective()); + LOG.info("removeDescriptor " + id + ": pool not found for directive " + + existing.getDescriptor()); return new Fallible( - new UnexpectedRemovePathBasedCacheEntryException(entryId)); + new UnexpectedRemovePathBasedCacheDescriptorException(id)); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { - LOG.info("removeEntry " + entryId + ": write permission denied to " + + LOG.info("removeDescriptor " + id + ": write permission denied to " + "pool " + pool + " for entry " + existing); return new Fallible( - new RemovePermissionDeniedException(entryId)); + new RemovePermissionDeniedException(id)); } - // Remove the corresponding entry in entriesByDirective. - if (entriesByDirective.remove(existing.getDirective()) == null) { - LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + - existing + " in entriesByDirective"); - return new Fallible( - new UnexpectedRemovePathBasedCacheEntryException(entryId)); - } // Remove the corresponding entry in entriesByPath. - String path = existing.getDirective().getPath(); + String path = existing.getDescriptor().getPath(); List entries = entriesByPath.get(path); if (entries == null || !entries.remove(existing)) { return new Fallible( - new UnexpectedRemovePathBasedCacheEntryException(entryId)); + new UnexpectedRemovePathBasedCacheDescriptorException(id)); } if (entries.size() == 0) { entriesByPath.remove(path); } - entriesById.remove(entryId); + entriesById.remove(id); // Set the path as uncached in the namesystem try { - INode node = dir.getINode(existing.getDirective().getPath()); - if (node.isFile()) { - namesystem.setCacheReplicationInt(existing.getDirective().getPath(), + INode node = dir.getINode(existing.getDescriptor().getPath()); + if (node != null && node.isFile()) { + namesystem.setCacheReplicationInt(existing.getDescriptor().getPath(), (short) 0); } } catch (IOException e) { - LOG.warn("removeEntry " + entryId + ": failure while setting cache" + LOG.warn("removeDescriptor " + id + ": failure while setting cache" + " replication factor", e); return new Fallible(e); } - LOG.info("removeEntry successful for PathCacheEntry id " + entryId); - return new Fallible(entryId); + LOG.info("removeDescriptor successful for PathCacheEntry id " + id); + return new Fallible(id); } - public synchronized List> removeEntries(List entryIds, + public synchronized List> removeDescriptors(List ids, FSPermissionChecker pc) { ArrayList> results = - new ArrayList>(entryIds.size()); - for (Long entryId : entryIds) { - results.add(removeEntry(entryId, pc)); + new ArrayList>(ids.size()); + for (Long id : ids) { + results.add(removeDescriptor(id, pc)); } return results; } - public synchronized BatchedListEntries - listPathBasedCacheEntries(long prevId, String filterPool, + public synchronized BatchedListEntries + listPathBasedCacheDescriptors(long prevId, String filterPool, String filterPath, FSPermissionChecker pc) throws IOException { final int NUM_PRE_ALLOCATED_ENTRIES = 16; if (filterPath != null) { @@ -289,16 +293,16 @@ public synchronized List> removeEntries(List entryIds, throw new IOException("invalid path name '" + filterPath + "'"); } } - ArrayList replies = - new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); + ArrayList replies = + new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); int numReplies = 0; SortedMap tailMap = entriesById.tailMap(prevId + 1); for (Entry cur : tailMap.entrySet()) { - if (numReplies >= maxListCacheDirectivesResponses) { - return new BatchedListEntries(replies, true); + if (numReplies >= maxListCacheDescriptorsResponses) { + return new BatchedListEntries(replies, true); } PathBasedCacheEntry curEntry = cur.getValue(); - PathBasedCacheDirective directive = cur.getValue().getDirective(); + PathBasedCacheDirective directive = cur.getValue().getDescriptor(); if (filterPool != null && !directive.getPool().equals(filterPool)) { continue; @@ -307,17 +311,12 @@ public synchronized List> removeEntries(List entryIds, !directive.getPath().equals(filterPath)) { continue; } - CachePool pool = cachePools.get(curEntry.getDirective().getPool()); - if (pool == null) { - LOG.error("invalid pool for PathBasedCacheEntry " + curEntry); - continue; - } - if (pc.checkPermission(pool, FsAction.READ)) { - replies.add(cur.getValue()); + if (pc.checkPermission(curEntry.getPool(), FsAction.READ)) { + replies.add(cur.getValue().getDescriptor()); numReplies++; } } - return new BatchedListEntries(replies, false); + return new BatchedListEntries(replies, false); } /** @@ -409,12 +408,12 @@ public synchronized void removeCachePool(String poolName) // Remove entries using this pool // TODO: could optimize this somewhat to avoid the need to iterate - // over all entries in entriesByDirective - Iterator> iter = - entriesByDirective.entrySet().iterator(); + // over all entries in entriesById + Iterator> iter = + entriesById.entrySet().iterator(); while (iter.hasNext()) { - Entry entry = iter.next(); - if (entry.getKey().getPool().equals(poolName)) { + Entry entry = iter.next(); + if (entry.getValue().getPool() == pool) { entriesById.remove(entry.getValue().getEntryId()); iter.remove(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8d41ca58104..01ef3d8bfce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -144,7 +144,7 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -6801,17 +6801,17 @@ void removeSnapshottableDirs(List toRemove) { } @SuppressWarnings("unchecked") - List> addPathBasedCacheDirectives( + List> addPathBasedCacheDirectives( List directives) throws IOException { CacheEntryWithPayload retryCacheEntry = RetryCache.waitForCompletion(retryCache, null); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (List>) retryCacheEntry.getPayload(); + return (List>) retryCacheEntry.getPayload(); } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; boolean success = false; - List> results = null; + List> results = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6837,7 +6837,7 @@ List> addPathBasedCacheDirectives( } @SuppressWarnings("unchecked") - List> removePathBasedCacheEntries(List ids) throws IOException { + List> removePathBasedCacheDescriptors(List ids) throws IOException { CacheEntryWithPayload retryCacheEntry = RetryCache.waitForCompletion(retryCache, null); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { @@ -6855,13 +6855,13 @@ List> removePathBasedCacheEntries(List ids) throws IOExcept throw new SafeModeException( "Cannot remove PathBasedCache directives", safeMode); } - results = cacheManager.removeEntries(ids, pc); + results = cacheManager.removeDescriptors(ids, pc); //getEditLog().logRemovePathBasedCacheEntries(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removePathBasedCacheEntries", null, null, null); + logAuditEvent(success, "removePathBasedCacheDescriptors", null, null, null); } RetryCache.setState(retryCacheEntry, success, results); } @@ -6869,22 +6869,22 @@ List> removePathBasedCacheEntries(List ids) throws IOExcept return results; } - BatchedListEntries listPathBasedCacheEntries(long startId, + BatchedListEntries listPathBasedCacheDescriptors(long startId, String pool, String path) throws IOException { final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; - BatchedListEntries results; + BatchedListEntries results; checkOperation(OperationCategory.READ); readLock(); boolean success = false; try { checkOperation(OperationCategory.READ); - results = cacheManager.listPathBasedCacheEntries(startId, pool, path, pc); + results = cacheManager.listPathBasedCacheDescriptors(startId, pool, path, pc); success = true; } finally { readUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "listPathBasedCacheEntries", null, null, null); + logAuditEvent(success, "listPathBasedCacheDescriptors", null, null, null); } } return results; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 33f7815c7fb..adfda61fa84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -61,7 +61,7 @@ import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -1213,19 +1213,19 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, } @Override - public List> addPathBasedCacheDirectives( + public List> addPathBasedCacheDirectives( List paths) throws IOException { return namesystem.addPathBasedCacheDirectives(paths); } @Override - public List> removePathBasedCacheEntries(List ids) + public List> removePathBasedCacheDescriptors(List ids) throws IOException { - return namesystem.removePathBasedCacheEntries(ids); + return namesystem.removePathBasedCacheDescriptors(ids); } private class ServerSidePathBasedCacheEntriesIterator - extends BatchedRemoteIterator { + extends BatchedRemoteIterator { private final String pool; @@ -1239,19 +1239,19 @@ public ServerSidePathBasedCacheEntriesIterator(Long firstKey, String pool, } @Override - public BatchedEntries makeRequest( + public BatchedEntries makeRequest( Long nextKey) throws IOException { - return namesystem.listPathBasedCacheEntries(nextKey, pool, path); + return namesystem.listPathBasedCacheDescriptors(nextKey, pool, path); } @Override - public Long elementToPrevKey(PathBasedCacheEntry entry) { + public Long elementToPrevKey(PathBasedCacheDescriptor entry) { return entry.getEntryId(); } } @Override - public RemoteIterator listPathBasedCacheEntries(long prevId, + public RemoteIterator listPathBasedCacheDescriptors(long prevId, String pool, String path) throws IOException { return new ServerSidePathBasedCacheEntriesIterator(prevId, pool, path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index a989f5ff76a..bc9c77957c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.tools.TableListing.Justification; import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.StringUtils; @@ -100,10 +100,10 @@ public int run(List args) throws IOException { new LinkedList(); PathBasedCacheDirective directive = new PathBasedCacheDirective(path, poolName); directives.add(directive); - List> results = + List> results = dfs.addPathBasedCacheDirective(directives); try { - PathBasedCacheEntry entry = results.get(0).get(); + PathBasedCacheDescriptor entry = results.get(0).get(); System.out.println("Added PathBasedCache entry " + entry.getEntryId()); return 0; } catch (IOException e) { @@ -155,7 +155,7 @@ public int run(List args) throws IOException { DistributedFileSystem dfs = getDFS(); List ids = new LinkedList(); ids.add(id); - List> results = dfs.removePathBasedCacheEntries(ids); + List> results = dfs.removePathBasedCacheDescriptors(ids); try { Long resultId = results.get(0).get(); System.out.println("Removed PathBasedCache entry " + resultId); @@ -208,15 +208,13 @@ public int run(List args) throws IOException { addField("PATH", Justification.LEFT). build(); DistributedFileSystem dfs = getDFS(); - RemoteIterator iter = - dfs.listPathBasedCacheEntries(poolFilter, pathFilter); + RemoteIterator iter = + dfs.listPathBasedCacheDescriptors(poolFilter, pathFilter); int numEntries = 0; while (iter.hasNext()) { - PathBasedCacheEntry entry = iter.next(); + PathBasedCacheDescriptor entry = iter.next(); String row[] = new String[] { - "" + entry.getEntryId(), - entry.getDirective().getPool(), - entry.getDirective().getPath(), + "" + entry.getEntryId(), entry.getPool(), entry.getPath(), }; tableListing.addRow(row); numEntries++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index f2f4c5e7b96..401aca7638b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -382,37 +382,38 @@ enum AddPathBasedCacheDirectiveErrorProto { INVALID_PATH_NAME_ERROR = -3; INVALID_POOL_NAME_ERROR = -4; ADD_PERMISSION_DENIED_ERROR = -5; + PATH_ALREADY_EXISTS_IN_POOL_ERROR = -6; } -message RemovePathBasedCacheEntriesRequestProto { +message RemovePathBasedCacheDescriptorsRequestProto { repeated int64 elements = 1 [packed=true]; } -message RemovePathBasedCacheEntriesResponseProto { +message RemovePathBasedCacheDescriptorsResponseProto { repeated int64 results = 1 [packed=true]; } -enum RemovePathBasedCacheEntryErrorProto { +enum RemovePathBasedCacheDescriptorErrorProto { UNEXPECTED_REMOVE_ERROR = -1; INVALID_CACHED_PATH_ID_ERROR = -2; NO_SUCH_CACHED_PATH_ID_ERROR = -3; REMOVE_PERMISSION_DENIED_ERROR = -4; } -message ListPathBasedCacheEntriesRequestProto { +message ListPathBasedCacheDescriptorsRequestProto { required int64 prevId = 1; optional string pool = 2; optional string path = 3; } -message ListPathBasedCacheEntriesElementProto { +message ListPathBasedCacheDescriptorsElementProto { required int64 id = 1; required string pool = 2; required string path = 3; } -message ListPathBasedCacheEntriesResponseProto { - repeated ListPathBasedCacheEntriesElementProto elements = 1; +message ListPathBasedCacheDescriptorsResponseProto { + repeated ListPathBasedCacheDescriptorsElementProto elements = 1; required bool hasMore = 2; } @@ -645,10 +646,10 @@ service ClientNamenodeProtocol { rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); rpc addPathBasedCacheDirectives(AddPathBasedCacheDirectivesRequestProto) returns (AddPathBasedCacheDirectivesResponseProto); - rpc removePathBasedCacheEntries(RemovePathBasedCacheEntriesRequestProto) - returns (RemovePathBasedCacheEntriesResponseProto); - rpc listPathBasedCacheEntries(ListPathBasedCacheEntriesRequestProto) - returns (ListPathBasedCacheEntriesResponseProto); + rpc removePathBasedCacheDescriptors(RemovePathBasedCacheDescriptorsRequestProto) + returns (RemovePathBasedCacheDescriptorsResponseProto); + rpc listPathBasedCacheDescriptors(ListPathBasedCacheDescriptorsRequestProto) + returns (ListPathBasedCacheDescriptorsResponseProto); rpc addCachePool(AddCachePoolRequestProto) returns(AddCachePoolResponseProto); rpc modifyCachePool(ModifyCachePoolRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java index 8c7037c1e6b..45417150229 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; @@ -151,12 +151,11 @@ public void testCachePaths() throws Exception { List toAdd = new ArrayList(); toAdd.add(new PathBasedCacheDirective(paths.get(i), pool)); - List> fallibles = + List> fallibles = nnRpc.addPathBasedCacheDirectives(toAdd); assertEquals("Unexpected number of fallibles", 1, fallibles.size()); - PathBasedCacheEntry entry = fallibles.get(0).get(); - PathBasedCacheDirective directive = entry.getDirective(); + PathBasedCacheDescriptor directive = fallibles.get(0).get(); assertEquals("Directive does not match requested path", paths.get(i), directive.getPath()); assertEquals("Directive does not match requested pool", pool, @@ -165,13 +164,13 @@ public void testCachePaths() throws Exception { waitForExpectedNumCachedBlocks(expected); } // Uncache and check each path in sequence - RemoteIterator entries = - nnRpc.listPathBasedCacheEntries(0, null, null); + RemoteIterator entries = + nnRpc.listPathBasedCacheDescriptors(0, null, null); for (int i=0; i toRemove = new ArrayList(); toRemove.add(entry.getEntryId()); - List> fallibles = nnRpc.removePathBasedCacheEntries(toRemove); + List> fallibles = nnRpc.removePathBasedCacheDescriptors(toRemove); assertEquals("Unexpected number of fallibles", 1, fallibles.size()); Long l = fallibles.get(0).get(); assertEquals("Removed entryId does not match requested", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index bf06eeb3ba6..d4eb57885d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -39,10 +39,10 @@ import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.InvalidIdException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheEntryException.NoSuchIdException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -94,7 +94,7 @@ public void testCreateAndModifyPools() throws Exception { MiniDFSCluster cluster = null; // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); NamenodeProtocols proto = cluster.getNameNodeRpc(); @@ -132,17 +132,14 @@ public void testCreateAndModifyPools() throws Exception { } private static void validateListAll( - RemoteIterator iter, + RemoteIterator iter, long id0, long id1, long id2) throws Exception { - Assert.assertEquals(new PathBasedCacheEntry(id0, - new PathBasedCacheDirective("/alpha", "pool1")), - iter.next()); - Assert.assertEquals(new PathBasedCacheEntry(id1, - new PathBasedCacheDirective("/beta", "pool2")), - iter.next()); - Assert.assertEquals(new PathBasedCacheEntry(id2, - new PathBasedCacheDirective("/gamma", "pool1")), - iter.next()); + Assert.assertEquals(new PathBasedCacheDescriptor(id0, + "/alpha", "pool1"), iter.next()); + Assert.assertEquals(new PathBasedCacheDescriptor(id1, + "/beta", "pool2"), iter.next()); + Assert.assertEquals(new PathBasedCacheDescriptor(id2, + "/gamma", "pool1"), iter.next()); Assert.assertFalse(iter.hasNext()); } @@ -164,11 +161,11 @@ public void testSetAndGet() throws Exception { proto.addCachePool(new CachePoolInfo("pool4"). setMode(new FsPermission((short)0))); - List> addResults1 = + List> addResults1 = unprivilegedUser.doAs(new PrivilegedExceptionAction< - List>>() { + List>>() { @Override - public List> run() throws IOException { + public List> run() throws IOException { return proto.addPathBasedCacheDirectives(Arrays.asList( new PathBasedCacheDirective[] { new PathBasedCacheDirective("/alpha", "pool1"), @@ -212,7 +209,7 @@ public List> run() throws IOException { //instanceof PoolWritePermissionDeniedError); } - List> addResults2 = + List> addResults2 = proto.addPathBasedCacheDirectives(Arrays.asList( new PathBasedCacheDirective[] { new PathBasedCacheDirective("/alpha", "pool1"), @@ -240,20 +237,20 @@ public List> run() throws IOException { long ids2[] = new long[1]; ids2[0] = addResults2.get(3).get().getEntryId(); - RemoteIterator iter = - proto.listPathBasedCacheEntries(0, null, null); + RemoteIterator iter = + proto.listPathBasedCacheDescriptors(0, null, null); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathBasedCacheEntries(0, null, null); + iter = proto.listPathBasedCacheDescriptors(0, null, null); validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathBasedCacheEntries(0, "pool3", null); + iter = proto.listPathBasedCacheDescriptors(0, "pool3", null); Assert.assertFalse(iter.hasNext()); - iter = proto.listPathBasedCacheEntries(0, "pool2", null); + iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); Assert.assertEquals(addResults1.get(1).get(), iter.next()); Assert.assertFalse(iter.hasNext()); List> removeResults1 = - proto.removePathBasedCacheEntries(Arrays.asList( + proto.removePathBasedCacheDescriptors(Arrays.asList( new Long[] { ids1[1], -42L, 999999L })); Assert.assertEquals(Long.valueOf(ids1[1]), removeResults1.get(0).get()); @@ -269,7 +266,7 @@ public List> run() throws IOException { } catch (IOException ioe) { Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); } - iter = proto.listPathBasedCacheEntries(0, "pool2", null); + iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); Assert.assertFalse(iter.hasNext()); } finally { if (cluster != null) { cluster.shutdown(); } From a0d9a155a4a4258f628e927e096ecf6673f788ec Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Sat, 21 Sep 2013 00:20:36 +0000 Subject: [PATCH 23/51] HDFS-5236. Change PathBasedCacheDirective APIs to be a single value rather than batch. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1525183 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 70 +++ .../hadoop/hdfs/DistributedFileSystem.java | 51 +- .../AddPathBasedCacheDirectiveException.java | 54 +- .../hadoop/hdfs/protocol/CachePoolInfo.java | 18 + .../hadoop/hdfs/protocol/ClientProtocol.java | 32 +- ...movePathBasedCacheDescriptorException.java | 40 +- ...amenodeProtocolServerSideTranslatorPB.java | 100 +--- .../ClientNamenodeProtocolTranslatorPB.java | 140 +---- .../hdfs/server/namenode/CacheManager.java | 74 +-- .../hdfs/server/namenode/CachePool.java | 11 +- .../hdfs/server/namenode/FSNamesystem.java | 48 +- .../server/namenode/NameNodeRpcServer.java | 12 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 43 +- .../main/proto/ClientNamenodeProtocol.proto | 39 +- .../TestCacheReplicationManager.java | 29 +- .../namenode/TestPathBasedCacheRequests.java | 526 +++++++++++------- 17 files changed, 652 insertions(+), 638 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index a0d03857f2e..9e936943013 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -42,6 +42,9 @@ HDFS-4949 (Unreleased) HDFS-5213. Separate PathBasedCacheEntry and PathBasedCacheDirectiveWithId. (Contributed by Colin Patrick McCabe) + HDFS-5236. Change PathBasedCacheDirective APIs to be a single value + rather than batch. (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 02afb0c5d71..f032bd1bddf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -98,6 +98,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; @@ -107,6 +108,7 @@ import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -115,6 +117,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -2279,7 +2283,73 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir, throw re.unwrapRemoteException(); } } + + public PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective directive) throws IOException { + checkOpen(); + try { + return namenode.addPathBasedCacheDirective(directive); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor) + throws IOException { + checkOpen(); + try { + namenode.removePathBasedCacheDescriptor(descriptor.getEntryId()); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + public RemoteIterator listPathBasedCacheDescriptors( + String pool, String path) throws IOException { + checkOpen(); + try { + return namenode.listPathBasedCacheDescriptors(0, pool, path); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + public void addCachePool(CachePoolInfo info) throws IOException { + checkOpen(); + try { + namenode.addCachePool(info); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + public void modifyCachePool(CachePoolInfo info) throws IOException { + checkOpen(); + try { + namenode.modifyCachePool(info); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + public void removeCachePool(String poolName) throws IOException { + checkOpen(); + try { + namenode.removeCachePool(poolName); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + + public RemoteIterator listCachePools() throws IOException { + checkOpen(); + try { + return namenode.listCachePools(""); + } catch (RemoteException re) { + throw re.unwrapRemoteException(); + } + } + /** * Save namespace image. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index b6b412a6101..11d5fb05bed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -79,7 +79,6 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Progressable; import com.google.common.annotations.VisibleForTesting; @@ -1584,29 +1583,26 @@ public Boolean next(final FileSystem fs, final Path p) } /** - * Add some PathBasedCache directives. + * Add a new PathBasedCacheDirective. * - * @param directives A list of PathBasedCache directives to be added. - * @return A Fallible list, where each element is either a successfully addded - * PathBasedCache entry, or an IOException describing why the directive - * could not be added. + * @param directive A PathBasedCacheDirectives to add + * @return PathBasedCacheDescriptor associated with the added directive + * @throws IOException if the directive could not be added */ - public List> - addPathBasedCacheDirective(List directives) - throws IOException { - return dfs.namenode.addPathBasedCacheDirectives(directives); + public PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective directive) throws IOException { + return dfs.addPathBasedCacheDirective(directive); } /** - * Remove some PathBasedCache entries. + * Remove a PathBasedCacheDescriptor. * - * @param ids A list of all the entry IDs to be removed. - * @return A Fallible list where each element is either a successfully removed - * ID, or an IOException describing why the ID could not be removed. + * @param descriptor PathBasedCacheDescriptor to remove + * @throws IOException if the descriptor could not be removed */ - public List> - removePathBasedCacheDescriptors(List ids) throws IOException { - return dfs.namenode.removePathBasedCacheDescriptors(ids); + public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor) + throws IOException { + dfs.removePathBasedCacheDescriptor(descriptor); } /** @@ -1619,43 +1615,46 @@ public Boolean next(final FileSystem fs, final Path p) */ public RemoteIterator listPathBasedCacheDescriptors( String pool, String path) throws IOException { - return dfs.namenode.listPathBasedCacheDescriptors(0, pool, path); + return dfs.listPathBasedCacheDescriptors(pool, path); } /** * Add a cache pool. * - * @param req + * @param info * The request to add a cache pool. * @throws IOException * If the request could not be completed. */ public void addCachePool(CachePoolInfo info) throws IOException { - dfs.namenode.addCachePool(info); + CachePoolInfo.validate(info); + dfs.addCachePool(info); } /** * Modify an existing cache pool. * - * @param req + * @param info * The request to modify a cache pool. * @throws IOException * If the request could not be completed. */ public void modifyCachePool(CachePoolInfo info) throws IOException { - dfs.namenode.modifyCachePool(info); + CachePoolInfo.validate(info); + dfs.modifyCachePool(info); } /** * Remove a cache pool. * - * @param cachePoolName + * @param poolName * Name of the cache pool to remove. * @throws IOException * if the cache pool did not exist, or could not be removed. */ - public void removeCachePool(String name) throws IOException { - dfs.namenode.removeCachePool(name); + public void removeCachePool(String poolName) throws IOException { + CachePoolInfo.validateName(poolName); + dfs.removeCachePool(poolName); } /** @@ -1667,6 +1666,6 @@ public void removeCachePool(String name) throws IOException { * If there was an error listing cache pools. */ public RemoteIterator listCachePools() throws IOException { - return dfs.namenode.listCachePools(""); + return dfs.listCachePools(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java index 2a95a81996e..c077f9c90be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java @@ -25,24 +25,20 @@ public abstract class AddPathBasedCacheDirectiveException extends IOException { private static final long serialVersionUID = 1L; - private final PathBasedCacheDirective directive; - - public AddPathBasedCacheDirectiveException(String description, - PathBasedCacheDirective directive) { + public AddPathBasedCacheDirectiveException(String description) { super(description); - this.directive = directive; - } - - public PathBasedCacheDirective getDirective() { - return directive; } public static final class EmptyPathError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public EmptyPathError(String msg) { + super(msg); + } + public EmptyPathError(PathBasedCacheDirective directive) { - super("empty path in directive " + directive, directive); + this("empty path in directive " + directive); } } @@ -50,9 +46,12 @@ public static class InvalidPathNameError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public InvalidPathNameError(String msg) { + super(msg); + } + public InvalidPathNameError(PathBasedCacheDirective directive) { - super("can't handle non-absolute path name " + directive.getPath(), - directive); + this("can't handle invalid path name " + directive.getPath()); } } @@ -60,8 +59,12 @@ public static class InvalidPoolNameError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public InvalidPoolNameError(String msg) { + super(msg); + } + public InvalidPoolNameError(PathBasedCacheDirective directive) { - super("invalid pool name '" + directive.getPool() + "'", directive); + this("invalid pool name '" + directive.getPool() + "'"); } } @@ -69,9 +72,12 @@ public static class PoolWritePermissionDeniedError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public PoolWritePermissionDeniedError(String msg) { + super(msg); + } + public PoolWritePermissionDeniedError(PathBasedCacheDirective directive) { - super("write permission denied for pool '" + directive.getPool() + "'", - directive); + this("write permission denied for pool '" + directive.getPool() + "'"); } } @@ -79,9 +85,13 @@ public static class PathAlreadyExistsInPoolError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public PathAlreadyExistsInPoolError(String msg) { + super(msg); + } + public PathAlreadyExistsInPoolError(PathBasedCacheDirective directive) { - super("path " + directive.getPath() + " already exists in pool " + - directive.getPool(), directive); + this("path " + directive.getPath() + " already exists in pool " + + directive.getPool()); } } @@ -89,10 +99,14 @@ public static class UnexpectedAddPathBasedCacheDirectiveException extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; + public UnexpectedAddPathBasedCacheDirectiveException(String msg) { + super(msg); + } + public UnexpectedAddPathBasedCacheDirectiveException( PathBasedCacheDirective directive) { - super("encountered an unexpected error when trying to " + - "add PathBasedCache directive " + directive, directive); + this("encountered an unexpected error when trying to " + + "add PathBasedCache directive " + directive); } } -}; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index 6641cd29004..c07274b35a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.protocol; +import java.io.IOException; + import javax.annotation.Nullable; import org.apache.commons.lang.builder.EqualsBuilder; @@ -127,4 +129,20 @@ public int hashCode() { append(weight). hashCode(); } + + public static void validate(CachePoolInfo info) throws IOException { + if (info == null) { + throw new IOException("CachePoolInfo is null"); + } + validateName(info.poolName); + } + + public static void validateName(String poolName) throws IOException { + if (poolName == null || poolName.isEmpty()) { + // Empty pool names are not allowed because they would be highly + // confusing. They would also break the ability to list all pools + // by starting with prevKey = "" + throw new IOException("invalid empty cache pool name"); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index d33e7f94e1e..f7d2272f008 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -19,7 +19,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -48,7 +47,6 @@ import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; -import org.apache.hadoop.util.Fallible; /********************************************************************** * ClientProtocol is used by user code via @@ -1098,28 +1096,24 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException; /** - * Add some PathBasedCache directives to the CacheManager. + * Add a PathBasedCache entry to the CacheManager. * - * @param directives A list of PathBasedCache directives to be added. - * @return A Fallible list, where each element is either a successfully addded - * PathBasedCache entry, or an IOException describing why the directive - * could not be added. + * @param directive A PathBasedCacheDirective to be added + * @return A PathBasedCacheDescriptor associated with the added directive + * @throws IOException if the directive could not be added */ @AtMostOnce - public List> - addPathBasedCacheDirectives(List directives) - throws IOException; + public PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective directive) throws IOException; /** - * Remove some PathBasedCache entries from the CacheManager. + * Remove a PathBasedCacheDescriptor from the CacheManager. * - * @param ids A list of all the entry IDs to be removed from the CacheManager. - * @return A Fallible list where each element is either a successfully removed - * ID, or an IOException describing why the ID could not be removed. + * @param id of a PathBasedCacheDescriptor + * @throws IOException if the cache descriptor could not be removed */ @AtMostOnce - public List> removePathBasedCacheDescriptors(List ids) - throws IOException; + public void removePathBasedCacheDescriptor(Long id) throws IOException; /** * List the set of cached paths of a cache pool. Incrementally fetches results @@ -1132,9 +1126,9 @@ public List> removePathBasedCacheDescriptors(List ids) * @return A RemoteIterator which returns PathBasedCacheDescriptor objects. */ @Idempotent - public RemoteIterator listPathBasedCacheDescriptors(long prevId, - String pool, String path) throws IOException; - + public RemoteIterator listPathBasedCacheDescriptors( + long prevId, String pool, String path) throws IOException; + /** * Add a new cache pool. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java index 8ef8c80a5af..7560062a927 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RemovePathBasedCacheDescriptorException.java @@ -19,31 +19,26 @@ import java.io.IOException; -import com.google.common.base.Preconditions; - /** * An exception which occurred when trying to remove a PathBasedCache entry. */ public abstract class RemovePathBasedCacheDescriptorException extends IOException { private static final long serialVersionUID = 1L; - private final long entryId; - - public RemovePathBasedCacheDescriptorException(String description, long entryId) { + public RemovePathBasedCacheDescriptorException(String description) { super(description); - this.entryId = entryId; - } - - public long getEntryId() { - return this.entryId; } public final static class InvalidIdException extends RemovePathBasedCacheDescriptorException { private static final long serialVersionUID = 1L; + public InvalidIdException(String msg) { + super(msg); + } + public InvalidIdException(long entryId) { - super("invalid PathBasedCacheDescriptor id " + entryId, entryId); + this("invalid PathBasedCacheDescriptor id " + entryId); } } @@ -51,9 +46,13 @@ public final static class RemovePermissionDeniedException extends RemovePathBasedCacheDescriptorException { private static final long serialVersionUID = 1L; + public RemovePermissionDeniedException(String msg) { + super(msg); + } + public RemovePermissionDeniedException(long entryId) { - super("permission denied when trying to remove " + - "PathBasedCacheDescriptor id " + entryId, entryId); + this("permission denied when trying to remove " + + "PathBasedCacheDescriptor id " + entryId); } } @@ -61,9 +60,12 @@ public final static class NoSuchIdException extends RemovePathBasedCacheDescriptorException { private static final long serialVersionUID = 1L; + public NoSuchIdException(String msg) { + super(msg); + } + public NoSuchIdException(long entryId) { - super("there is no PathBasedCacheDescriptor with id " + entryId, - entryId); + this("there is no PathBasedCacheDescriptor with id " + entryId); } } @@ -71,9 +73,13 @@ public final static class UnexpectedRemovePathBasedCacheDescriptorException extends RemovePathBasedCacheDescriptorException { private static final long serialVersionUID = 1L; + public UnexpectedRemovePathBasedCacheDescriptorException(String msg) { + super(msg); + } + public UnexpectedRemovePathBasedCacheDescriptorException(long id) { - super("encountered an unexpected error when trying to " + - "remove PathBasedCacheDescriptor with id " + id, id); + this("encountered an unexpected error when trying to " + + "remove PathBasedCacheDescriptor with id " + id); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 48ebd9095e6..272286572a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.protocolPB; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -28,11 +27,6 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PathAlreadyExistsInPoolError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -40,8 +34,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; @@ -53,9 +47,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveErrorProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; @@ -83,8 +76,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; @@ -132,9 +125,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorErrorProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2ResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; @@ -174,7 +166,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; @@ -184,7 +175,6 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Fallible; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -1040,44 +1030,18 @@ public IsFileClosedResponseProto isFileClosed( } @Override - public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcController controller, - AddPathBasedCacheDirectivesRequestProto request) throws ServiceException { + public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective( + RpcController controller, AddPathBasedCacheDirectiveRequestProto request) + throws ServiceException { try { - ArrayList input = - new ArrayList(request.getElementsCount()); - for (int i = 0; i < request.getElementsCount(); i++) { - PathBasedCacheDirectiveProto proto = request.getElements(i); - input.add(new PathBasedCacheDirective(proto.getPath(), proto.getPool())); - } - List> output = server.addPathBasedCacheDirectives(input); - AddPathBasedCacheDirectivesResponseProto.Builder builder = - AddPathBasedCacheDirectivesResponseProto.newBuilder(); - for (int idx = 0; idx < output.size(); idx++) { - try { - PathBasedCacheDescriptor directive = output.get(idx).get(); - builder.addResults(directive.getEntryId()); - } catch (IOException ioe) { - if (ioe.getCause() instanceof EmptyPathError) { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - EMPTY_PATH_ERROR_VALUE); - } else if (ioe.getCause() instanceof InvalidPathNameError) { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - INVALID_PATH_NAME_ERROR_VALUE); - } else if (ioe.getCause() instanceof InvalidPoolNameError) { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - INVALID_POOL_NAME_ERROR_VALUE); - } else if (ioe.getCause() instanceof PoolWritePermissionDeniedError) { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - ADD_PERMISSION_DENIED_ERROR_VALUE); - } else if (ioe.getCause() instanceof PathAlreadyExistsInPoolError) { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - PATH_ALREADY_EXISTS_IN_POOL_ERROR_VALUE); - } else { - builder.addResults(AddPathBasedCacheDirectiveErrorProto. - UNEXPECTED_ADD_ERROR_VALUE); - } - } - } + PathBasedCacheDirectiveProto proto = request.getDirective(); + PathBasedCacheDirective directive = + new PathBasedCacheDirective(proto.getPath(), proto.getPool()); + PathBasedCacheDescriptor descriptor = + server.addPathBasedCacheDirective(directive); + AddPathBasedCacheDirectiveResponseProto.Builder builder = + AddPathBasedCacheDirectiveResponseProto.newBuilder(); + builder.setDescriptorId(descriptor.getEntryId()); return builder.build(); } catch (IOException e) { throw new ServiceException(e); @@ -1085,32 +1049,14 @@ public AddPathBasedCacheDirectivesResponseProto addPathBasedCacheDirectives(RpcC } @Override - public RemovePathBasedCacheDescriptorsResponseProto removePathBasedCacheDescriptors( - RpcController controller, RemovePathBasedCacheDescriptorsRequestProto request) + public RemovePathBasedCacheDescriptorResponseProto removePathBasedCacheDescriptor( + RpcController controller, + RemovePathBasedCacheDescriptorRequestProto request) throws ServiceException { try { - List> output = - server.removePathBasedCacheDescriptors(request.getElementsList()); - RemovePathBasedCacheDescriptorsResponseProto.Builder builder = - RemovePathBasedCacheDescriptorsResponseProto.newBuilder(); - for (int idx = 0; idx < output.size(); idx++) { - try { - long id = output.get(idx).get(); - builder.addResults(id); - } catch (InvalidIdException ioe) { - builder.addResults(RemovePathBasedCacheDescriptorErrorProto. - INVALID_CACHED_PATH_ID_ERROR_VALUE); - } catch (NoSuchIdException ioe) { - builder.addResults(RemovePathBasedCacheDescriptorErrorProto. - NO_SUCH_CACHED_PATH_ID_ERROR_VALUE); - } catch (RemovePermissionDeniedException ioe) { - builder.addResults(RemovePathBasedCacheDescriptorErrorProto. - REMOVE_PERMISSION_DENIED_ERROR_VALUE); - } catch (IOException ioe) { - builder.addResults(RemovePathBasedCacheDescriptorErrorProto. - UNEXPECTED_REMOVE_ERROR_VALUE); - } - } + server.removePathBasedCacheDescriptor(request.getDescriptorId()); + RemovePathBasedCacheDescriptorResponseProto.Builder builder = + RemovePathBasedCacheDescriptorResponseProto.newBuilder(); return builder.build(); } catch (IOException e) { throw new ServiceException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index b389b026027..6c84c0460ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -20,9 +20,7 @@ import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -38,18 +36,7 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PathAlreadyExistsInPoolError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -62,17 +49,16 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveErrorProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectivesResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; @@ -110,23 +96,21 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsElementProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathBasedCacheDescriptorsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathBasedCacheDescriptorRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; @@ -147,7 +131,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.CacheManager; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -163,7 +146,6 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Fallible; import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; @@ -1020,101 +1002,33 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, } } - private static IOException addPathBasedCacheDirectivesError(long code, - PathBasedCacheDirective directive) { - if (code == AddPathBasedCacheDirectiveErrorProto.EMPTY_PATH_ERROR_VALUE) { - return new EmptyPathError(directive); - } else if (code == AddPathBasedCacheDirectiveErrorProto. - INVALID_PATH_NAME_ERROR_VALUE) { - return new InvalidPathNameError(directive); - } else if (code == AddPathBasedCacheDirectiveErrorProto. - INVALID_POOL_NAME_ERROR_VALUE) { - return new InvalidPoolNameError(directive); - } else if (code == AddPathBasedCacheDirectiveErrorProto. - ADD_PERMISSION_DENIED_ERROR_VALUE) { - return new PoolWritePermissionDeniedError(directive); - } else if (code == AddPathBasedCacheDirectiveErrorProto. - PATH_ALREADY_EXISTS_IN_POOL_ERROR_VALUE) { - return new PathAlreadyExistsInPoolError(directive); - } else { - return new UnexpectedAddPathBasedCacheDirectiveException(directive); - } - } - @Override - public List> addPathBasedCacheDirectives( - List directives) throws IOException { + public PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective directive) throws IOException { try { - AddPathBasedCacheDirectivesRequestProto.Builder builder = - AddPathBasedCacheDirectivesRequestProto.newBuilder(); - for (PathBasedCacheDirective directive : directives) { - builder.addElements(PathBasedCacheDirectiveProto.newBuilder(). - setPath(directive.getPath()). - setPool(directive.getPool()). - build()); - } - AddPathBasedCacheDirectivesResponseProto result = - rpcProxy.addPathBasedCacheDirectives(null, builder.build()); - int resultsCount = result.getResultsCount(); - ArrayList> results = - new ArrayList>(resultsCount); - for (int i = 0; i < resultsCount; i++) { - PathBasedCacheDirective directive = directives.get(i); - long code = result.getResults(i); - if (code > 0) { - results.add(new Fallible( - new PathBasedCacheDescriptor(code, - directive.getPath(), directive.getPool()))); - } else { - results.add(new Fallible( - addPathBasedCacheDirectivesError(code, directive))); - } - } - return results; + AddPathBasedCacheDirectiveRequestProto.Builder builder = + AddPathBasedCacheDirectiveRequestProto.newBuilder(); + builder.setDirective(PathBasedCacheDirectiveProto.newBuilder() + .setPath(directive.getPath()) + .setPool(directive.getPool()) + .build()); + AddPathBasedCacheDirectiveResponseProto result = + rpcProxy.addPathBasedCacheDirective(null, builder.build()); + return new PathBasedCacheDescriptor(result.getDescriptorId(), + directive.getPath(), directive.getPool()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } - - private static IOException removePathBasedCacheDescriptorsError(long code, long id) { - if (code == RemovePathBasedCacheDescriptorErrorProto. - INVALID_CACHED_PATH_ID_ERROR_VALUE) { - return new InvalidIdException(id); - } else if (code == RemovePathBasedCacheDescriptorErrorProto. - NO_SUCH_CACHED_PATH_ID_ERROR_VALUE) { - return new NoSuchIdException(id); - } else if (code == RemovePathBasedCacheDescriptorErrorProto. - REMOVE_PERMISSION_DENIED_ERROR_VALUE) { - return new RemovePermissionDeniedException(id); - } else { - return new UnexpectedRemovePathBasedCacheDescriptorException(id); - } - } @Override - public List> removePathBasedCacheDescriptors(List ids) + public void removePathBasedCacheDescriptor(Long id) throws IOException { try { - RemovePathBasedCacheDescriptorsRequestProto.Builder builder = - RemovePathBasedCacheDescriptorsRequestProto.newBuilder(); - for (Long id : ids) { - builder.addElements(id); - } - RemovePathBasedCacheDescriptorsResponseProto result = - rpcProxy.removePathBasedCacheDescriptors(null, builder.build()); - int resultsCount = result.getResultsCount(); - ArrayList> results = - new ArrayList>(resultsCount); - for (int i = 0; i < resultsCount; i++) { - long code = result.getResults(i); - if (code > 0) { - results.add(new Fallible(code)); - } else { - results.add(new Fallible( - removePathBasedCacheDescriptorsError(code, ids.get(i)))); - } - } - return results; + RemovePathBasedCacheDescriptorRequestProto.Builder builder = + RemovePathBasedCacheDescriptorRequestProto.newBuilder(); + builder.setDescriptorId(id); + rpcProxy.removePathBasedCacheDescriptor(null, builder.build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 943ed507982..ad24227aa06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; -import org.apache.hadoop.util.Fallible; /** * The Cache Manager handles caching on DataNodes. @@ -136,25 +135,24 @@ synchronized long getNextEntryId() throws IOException { return null; } - private synchronized Fallible addDirective( - PathBasedCacheDirective directive, FSPermissionChecker pc) { + public synchronized PathBasedCacheDescriptor addDirective( + PathBasedCacheDirective directive, FSPermissionChecker pc) + throws IOException { CachePool pool = cachePools.get(directive.getPool()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); - return new Fallible( - new InvalidPoolNameError(directive)); + throw new InvalidPoolNameError(directive); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("addDirective " + directive + ": write permission denied."); - return new Fallible( - new PoolWritePermissionDeniedError(directive)); + throw new PoolWritePermissionDeniedError(directive); } try { directive.validate(); } catch (IOException ioe) { LOG.info("addDirective " + directive + ": validation failed: " + ioe.getClass().getName() + ": " + ioe.getMessage()); - return new Fallible(ioe); + throw ioe; } // Check if we already have this entry. @@ -162,8 +160,7 @@ private synchronized Fallible addDirective( if (existing != null) { LOG.info("addDirective " + directive + ": there is an " + "existing directive " + existing + " in this pool."); - return new Fallible( - existing.getDescriptor()); + return existing.getDescriptor(); } // Add a new entry with the next available ID. PathBasedCacheEntry entry; @@ -171,8 +168,7 @@ private synchronized Fallible addDirective( entry = new PathBasedCacheEntry(getNextEntryId(), directive.getPath(), pool); } catch (IOException ioe) { - return new Fallible( - new UnexpectedAddPathBasedCacheDirectiveException(directive)); + throw new UnexpectedAddPathBasedCacheDirectiveException(directive); } LOG.info("addDirective " + directive + ": added cache directive " + directive); @@ -202,56 +198,42 @@ private synchronized Fallible addDirective( } catch (IOException ioe) { LOG.info("addDirective " + directive +": failed to cache file: " + ioe.getClass().getName() +": " + ioe.getMessage()); - return new Fallible(ioe); + throw ioe; } - return new Fallible( - entry.getDescriptor()); + return entry.getDescriptor(); } - public synchronized List> addDirectives( - List directives, FSPermissionChecker pc) { - ArrayList> results = - new ArrayList>(directives.size()); - for (PathBasedCacheDirective directive: directives) { - results.add(addDirective(directive, pc)); - } - return results; - } - - private synchronized Fallible removeDescriptor(long id, - FSPermissionChecker pc) { + public synchronized void removeDescriptor(long id, FSPermissionChecker pc) + throws IOException { // Check for invalid IDs. if (id <= 0) { LOG.info("removeDescriptor " + id + ": invalid non-positive " + "descriptor ID."); - return new Fallible(new InvalidIdException(id)); + throw new InvalidIdException(id); } // Find the entry. PathBasedCacheEntry existing = entriesById.get(id); if (existing == null) { LOG.info("removeDescriptor " + id + ": entry not found."); - return new Fallible(new NoSuchIdException(id)); + throw new NoSuchIdException(id); } CachePool pool = cachePools.get(existing.getDescriptor().getPool()); if (pool == null) { LOG.info("removeDescriptor " + id + ": pool not found for directive " + existing.getDescriptor()); - return new Fallible( - new UnexpectedRemovePathBasedCacheDescriptorException(id)); + throw new UnexpectedRemovePathBasedCacheDescriptorException(id); } if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) { LOG.info("removeDescriptor " + id + ": write permission denied to " + "pool " + pool + " for entry " + existing); - return new Fallible( - new RemovePermissionDeniedException(id)); + throw new RemovePermissionDeniedException(id); } // Remove the corresponding entry in entriesByPath. String path = existing.getDescriptor().getPath(); List entries = entriesByPath.get(path); if (entries == null || !entries.remove(existing)) { - return new Fallible( - new UnexpectedRemovePathBasedCacheDescriptorException(id)); + throw new UnexpectedRemovePathBasedCacheDescriptorException(id); } if (entries.size() == 0) { entriesByPath.remove(path); @@ -268,20 +250,9 @@ private synchronized Fallible removeDescriptor(long id, } catch (IOException e) { LOG.warn("removeDescriptor " + id + ": failure while setting cache" + " replication factor", e); - return new Fallible(e); + throw e; } LOG.info("removeDescriptor successful for PathCacheEntry id " + id); - return new Fallible(id); - } - - public synchronized List> removeDescriptors(List ids, - FSPermissionChecker pc) { - ArrayList> results = - new ArrayList>(ids.size()); - for (Long id : ids) { - results.add(removeDescriptor(id, pc)); - } - return results; } public synchronized BatchedListEntries @@ -329,8 +300,8 @@ public synchronized List> removeDescriptors(List ids, */ public synchronized void addCachePool(CachePoolInfo info) throws IOException { + CachePoolInfo.validate(info); String poolName = info.getPoolName(); - CachePool.validateName(poolName); CachePool pool = cachePools.get(poolName); if (pool != null) { throw new IOException("cache pool " + poolName + " already exists."); @@ -352,10 +323,8 @@ public synchronized void addCachePool(CachePoolInfo info) */ public synchronized void modifyCachePool(CachePoolInfo info) throws IOException { + CachePoolInfo.validate(info); String poolName = info.getPoolName(); - if (poolName.isEmpty()) { - throw new IOException("invalid empty cache pool name"); - } CachePool pool = cachePools.get(poolName); if (pool == null) { throw new IOException("cache pool " + poolName + " does not exist."); @@ -401,9 +370,10 @@ public synchronized void modifyCachePool(CachePoolInfo info) */ public synchronized void removeCachePool(String poolName) throws IOException { + CachePoolInfo.validateName(poolName); CachePool pool = cachePools.remove(poolName); if (pool == null) { - throw new IOException("can't remove nonexistent cache pool " + poolName); + throw new IOException("can't remove non-existent cache pool " + poolName); } // Remove entries using this pool diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index d645c8270d3..b553154c7d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -24,9 +24,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.security.UserGroupInformation; /** @@ -162,13 +162,4 @@ public String toString() { append(", weight:").append(weight). append(" }").toString(); } - - public static void validateName(String name) throws IOException { - if (name.isEmpty()) { - // Empty pool names are not allowed because they would be highly - // confusing. They would also break the ability to list all pools - // by starting with prevKey = "" - throw new IOException("invalid empty cache pool name"); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f659ae772b6..0299ee7a7b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -132,7 +132,6 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; @@ -232,7 +231,6 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; -import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.VersionInfo; @@ -6884,18 +6882,17 @@ void removeSnapshottableDirs(List toRemove) { } } - @SuppressWarnings("unchecked") - List> addPathBasedCacheDirectives( - List directives) throws IOException { + PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective directive) throws IOException { CacheEntryWithPayload retryCacheEntry = RetryCache.waitForCompletion(retryCache, null); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (List>) retryCacheEntry.getPayload(); + return (PathBasedCacheDescriptor) retryCacheEntry.getPayload(); } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; boolean success = false; - List> results = null; + PathBasedCacheDescriptor result = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6904,8 +6901,8 @@ List> addPathBasedCacheDirectives( throw new SafeModeException( "Cannot add PathBasedCache directive", safeMode); } - results = cacheManager.addDirectives(directives, pc); - //getEditLog().logAddPathBasedCacheDirectives(results); FIXME: HDFS-5119 + result = cacheManager.addDirective(directive, pc); + //getEditLog().logAddPathBasedCacheDirective(result); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); @@ -6913,24 +6910,21 @@ List> addPathBasedCacheDirectives( getEditLog().logSync(); } if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "addPathBasedCacheDirectives", null, null, null); + logAuditEvent(success, "addPathBasedCacheDirective", null, null, null); } - RetryCache.setState(retryCacheEntry, success, results); + RetryCache.setState(retryCacheEntry, success, result); } - return results; + return result; } - @SuppressWarnings("unchecked") - List> removePathBasedCacheDescriptors(List ids) throws IOException { - CacheEntryWithPayload retryCacheEntry = - RetryCache.waitForCompletion(retryCache, null); + void removePathBasedCacheDescriptor(Long id) throws IOException { + CacheEntry retryCacheEntry = RetryCache.waitForCompletion(retryCache); if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (List>) retryCacheEntry.getPayload(); + return; } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; boolean success = false; - List> results = null; checkOperation(OperationCategory.WRITE); writeLock(); try { @@ -6939,22 +6933,22 @@ List> removePathBasedCacheDescriptors(List ids) throws IOEx throw new SafeModeException( "Cannot remove PathBasedCache directives", safeMode); } - results = cacheManager.removeDescriptors(ids, pc); + cacheManager.removeDescriptor(id, pc); //getEditLog().logRemovePathBasedCacheEntries(results); FIXME: HDFS-5119 success = true; } finally { writeUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "removePathBasedCacheDescriptors", null, null, null); + logAuditEvent(success, "removePathBasedCacheDescriptors", null, null, + null); } - RetryCache.setState(retryCacheEntry, success, results); + RetryCache.setState(retryCacheEntry, success); } getEditLog().logSync(); - return results; } - BatchedListEntries listPathBasedCacheDescriptors(long startId, - String pool, String path) throws IOException { + BatchedListEntries listPathBasedCacheDescriptors( + long startId, String pool, String path) throws IOException { final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; BatchedListEntries results; @@ -6963,12 +6957,14 @@ BatchedListEntries listPathBasedCacheDescriptors(long boolean success = false; try { checkOperation(OperationCategory.READ); - results = cacheManager.listPathBasedCacheDescriptors(startId, pool, path, pc); + results = + cacheManager.listPathBasedCacheDescriptors(startId, pool, path, pc); success = true; } finally { readUnlock(); if (isAuditEnabled() && isExternalInvocation()) { - logAuditEvent(success, "listPathBasedCacheDescriptors", null, null, null); + logAuditEvent(success, "listPathBasedCacheDescriptors", null, null, + null); } } return results; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index e8801649ba2..bbb67a3d978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -139,7 +139,6 @@ import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB; -import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; @@ -1238,15 +1237,14 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, } @Override - public List> addPathBasedCacheDirectives( - List paths) throws IOException { - return namesystem.addPathBasedCacheDirectives(paths); + public PathBasedCacheDescriptor addPathBasedCacheDirective( + PathBasedCacheDirective path) throws IOException { + return namesystem.addPathBasedCacheDirective(path); } @Override - public List> removePathBasedCacheDescriptors(List ids) - throws IOException { - return namesystem.removePathBasedCacheDescriptors(ids); + public void removePathBasedCacheDescriptor(Long id) throws IOException { + namesystem.removePathBasedCacheDescriptor(id); } private class ServerSidePathBasedCacheEntriesIterator diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index bc9c77957c2..0ba9023d923 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -22,15 +22,13 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.tools.TableListing.Justification; -import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.StringUtils; /** @@ -96,21 +94,14 @@ public int run(List args) throws IOException { } DistributedFileSystem dfs = getDFS(); - List directives = - new LinkedList(); - PathBasedCacheDirective directive = new PathBasedCacheDirective(path, poolName); - directives.add(directive); - List> results = - dfs.addPathBasedCacheDirective(directives); - try { - PathBasedCacheDescriptor entry = results.get(0).get(); - System.out.println("Added PathBasedCache entry " + entry.getEntryId()); - return 0; - } catch (IOException e) { - System.err.println("Error adding cache directive " + directive + ": " + - e.getMessage()); - return 1; - } + PathBasedCacheDirective directive = + new PathBasedCacheDirective(path, poolName); + + PathBasedCacheDescriptor descriptor = + dfs.addPathBasedCacheDirective(directive); + System.out.println("Added PathBasedCache entry " + + descriptor.getEntryId()); + return 0; } } @@ -153,18 +144,10 @@ public int run(List args) throws IOException { return 1; } DistributedFileSystem dfs = getDFS(); - List ids = new LinkedList(); - ids.add(id); - List> results = dfs.removePathBasedCacheDescriptors(ids); - try { - Long resultId = results.get(0).get(); - System.out.println("Removed PathBasedCache entry " + resultId); - return 0; - } catch (IOException e) { - System.err.println("Error removing cache directive " + id + ": " + - e.getMessage()); - return 1; - } + dfs.removePathBasedCacheDescriptor(new PathBasedCacheDescriptor(id, null, + null)); + System.out.println("Removed PathBasedCache directive " + id); + return 0; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 401aca7638b..59fac7a75d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -368,36 +368,19 @@ message PathBasedCacheDirectiveProto { required string pool = 2; } -message AddPathBasedCacheDirectivesRequestProto { - repeated PathBasedCacheDirectiveProto elements = 1; +message AddPathBasedCacheDirectiveRequestProto { + required PathBasedCacheDirectiveProto directive = 1; } -message AddPathBasedCacheDirectivesResponseProto { - repeated int64 results = 1 [packed=true]; +message AddPathBasedCacheDirectiveResponseProto { + required int64 descriptorId = 1; } -enum AddPathBasedCacheDirectiveErrorProto { - UNEXPECTED_ADD_ERROR = -1; - EMPTY_PATH_ERROR = -2; - INVALID_PATH_NAME_ERROR = -3; - INVALID_POOL_NAME_ERROR = -4; - ADD_PERMISSION_DENIED_ERROR = -5; - PATH_ALREADY_EXISTS_IN_POOL_ERROR = -6; +message RemovePathBasedCacheDescriptorRequestProto { + required int64 descriptorId = 1; } -message RemovePathBasedCacheDescriptorsRequestProto { - repeated int64 elements = 1 [packed=true]; -} - -message RemovePathBasedCacheDescriptorsResponseProto { - repeated int64 results = 1 [packed=true]; -} - -enum RemovePathBasedCacheDescriptorErrorProto { - UNEXPECTED_REMOVE_ERROR = -1; - INVALID_CACHED_PATH_ID_ERROR = -2; - NO_SUCH_CACHED_PATH_ID_ERROR = -3; - REMOVE_PERMISSION_DENIED_ERROR = -4; +message RemovePathBasedCacheDescriptorResponseProto { } message ListPathBasedCacheDescriptorsRequestProto { @@ -644,10 +627,10 @@ service ClientNamenodeProtocol { returns(ListCorruptFileBlocksResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); - rpc addPathBasedCacheDirectives(AddPathBasedCacheDirectivesRequestProto) - returns (AddPathBasedCacheDirectivesResponseProto); - rpc removePathBasedCacheDescriptors(RemovePathBasedCacheDescriptorsRequestProto) - returns (RemovePathBasedCacheDescriptorsResponseProto); + rpc addPathBasedCacheDirective(AddPathBasedCacheDirectiveRequestProto) + returns (AddPathBasedCacheDirectiveResponseProto); + rpc removePathBasedCacheDescriptor(RemovePathBasedCacheDescriptorRequestProto) + returns (RemovePathBasedCacheDescriptorResponseProto); rpc listPathBasedCacheDescriptors(ListPathBasedCacheDescriptorsRequestProto) returns (ListPathBasedCacheDescriptorsResponseProto); rpc addCachePool(AddCachePoolRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java index 45417150229..582de537cf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java @@ -36,12 +36,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.util.Fallible; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -148,17 +147,13 @@ public void testCachePaths() throws Exception { waitForExpectedNumCachedBlocks(expected); // Cache and check each path in sequence for (int i=0; i toAdd = - new ArrayList(); - toAdd.add(new PathBasedCacheDirective(paths.get(i), pool)); - List> fallibles = - nnRpc.addPathBasedCacheDirectives(toAdd); - assertEquals("Unexpected number of fallibles", - 1, fallibles.size()); - PathBasedCacheDescriptor directive = fallibles.get(0).get(); - assertEquals("Directive does not match requested path", paths.get(i), + PathBasedCacheDirective directive = new PathBasedCacheDirective(paths + .get(i), pool); + PathBasedCacheDescriptor descriptor = + nnRpc.addPathBasedCacheDirective(directive); + assertEquals("Descriptor does not match requested path", paths.get(i), directive.getPath()); - assertEquals("Directive does not match requested pool", pool, + assertEquals("Descriptor does not match requested pool", pool, directive.getPool()); expected += numBlocksPerFile; waitForExpectedNumCachedBlocks(expected); @@ -167,14 +162,8 @@ public void testCachePaths() throws Exception { RemoteIterator entries = nnRpc.listPathBasedCacheDescriptors(0, null, null); for (int i=0; i toRemove = new ArrayList(); - toRemove.add(entry.getEntryId()); - List> fallibles = nnRpc.removePathBasedCacheDescriptors(toRemove); - assertEquals("Unexpected number of fallibles", 1, fallibles.size()); - Long l = fallibles.get(0).get(); - assertEquals("Removed entryId does not match requested", - entry.getEntryId(), l.longValue()); + PathBasedCacheDescriptor descriptor = entries.next(); + nnRpc.removePathBasedCacheDescriptor(descriptor.getEntryId()); expected -= numBlocksPerFile; waitForExpectedNumCachedBlocks(expected); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index d4eb57885d7..d58343fe24d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -17,36 +17,40 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.*; +import static junit.framework.Assert.assertTrue; +import static junit.framework.Assert.fail; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.List; import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Fallible; +import org.junit.After; +import org.junit.Before; import org.junit.Test; public class TestPathBasedCacheRequests { @@ -55,221 +59,357 @@ public class TestPathBasedCacheRequests { private static final UserGroupInformation unprivilegedUser = UserGroupInformation.createRemoteUser("unprivilegedUser"); - @Test - public void testCreateAndRemovePools() throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; + static private Configuration conf; + static private MiniDFSCluster cluster; + static private DistributedFileSystem dfs; + static private NamenodeProtocols proto; - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - NamenodeProtocols proto = cluster.getNameNodeRpc(); - CachePoolInfo req = new CachePoolInfo("pool1"). - setOwnerName("bob").setGroupName("bobgroup"). - setMode(new FsPermission((short)0755)).setWeight(150); - proto.addCachePool(req); - try { - proto.removeCachePool("pool99"); - Assert.fail("expected to get an exception when " + - "removing a non-existent pool."); - } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove " + - "nonexistent cache pool", ioe); - } - proto.removeCachePool("pool1"); - try { - proto.removeCachePool("pool1"); - Assert.fail("expected to get an exception when " + - "removing a non-existent pool."); - } catch (IOException ioe) { - GenericTestUtils.assertExceptionContains("can't remove " + - "nonexistent cache pool", ioe); - } - req = new CachePoolInfo("pool2"); - proto.addCachePool(req); - } - - @Test - public void testCreateAndModifyPools() throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; + @Before + public void setup() throws Exception { + conf = new HdfsConfiguration(); // set low limits here for testing purposes conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); - NamenodeProtocols proto = cluster.getNameNodeRpc(); - proto.addCachePool(new CachePoolInfo("pool1"). - setOwnerName("abc").setGroupName("123"). - setMode(new FsPermission((short)0755)).setWeight(150)); - RemoteIterator iter = proto.listCachePools(""); + dfs = cluster.getFileSystem(); + proto = cluster.getNameNodeRpc(); + } + + @After + public void teardown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testBasicPoolOperations() throws Exception { + final String poolName = "pool1"; + CachePoolInfo info = new CachePoolInfo(poolName). + setOwnerName("bob").setGroupName("bobgroup"). + setMode(new FsPermission((short)0755)).setWeight(150); + + // Add a pool + dfs.addCachePool(info); + + // Do some bad addCachePools + try { + dfs.addCachePool(info); + fail("added the pool with the same name twice"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("pool1 already exists", ioe); + } + try { + dfs.addCachePool(new CachePoolInfo("")); + fail("added empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + dfs.addCachePool(null); + fail("added null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe); + } + try { + proto.addCachePool(new CachePoolInfo("")); + fail("added empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + proto.addCachePool(null); + fail("added null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe); + } + + // Modify the pool + info.setOwnerName("jane").setGroupName("janegroup") + .setMode(new FsPermission((short)0700)).setWeight(314); + dfs.modifyCachePool(info); + + // Do some invalid modify pools + try { + dfs.modifyCachePool(new CachePoolInfo("fool")); + fail("modified non-existent cache pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("fool does not exist", ioe); + } + try { + dfs.modifyCachePool(new CachePoolInfo("")); + fail("modified empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + dfs.modifyCachePool(null); + fail("modified null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe); + } + try { + proto.modifyCachePool(new CachePoolInfo("")); + fail("modified empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + proto.modifyCachePool(null); + fail("modified null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe); + } + + // Remove the pool + dfs.removeCachePool(poolName); + // Do some bad removePools + try { + dfs.removeCachePool("pool99"); + fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "non-existent cache pool", ioe); + } + try { + dfs.removeCachePool(poolName); + Assert.fail("expected to get an exception when " + + "removing a non-existent pool."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove " + + "non-existent cache pool", ioe); + } + try { + dfs.removeCachePool(""); + fail("removed empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + dfs.removeCachePool(null); + fail("removed null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + proto.removeCachePool(""); + fail("removed empty pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + try { + proto.removeCachePool(null); + fail("removed null pool"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("invalid empty cache pool name", + ioe); + } + + info = new CachePoolInfo("pool2"); + dfs.addCachePool(info); + } + + @Test + public void testCreateAndModifyPools() throws Exception { + String poolName = "pool1"; + String ownerName = "abc"; + String groupName = "123"; + FsPermission mode = new FsPermission((short)0755); + int weight = 150; + dfs.addCachePool(new CachePoolInfo(poolName). + setOwnerName(ownerName).setGroupName(groupName). + setMode(mode).setWeight(weight)); + + RemoteIterator iter = dfs.listCachePools(); CachePoolInfo info = iter.next(); - assertEquals("pool1", info.getPoolName()); - assertEquals("abc", info.getOwnerName()); - assertEquals("123", info.getGroupName()); - proto.modifyCachePool(new CachePoolInfo("pool1"). - setOwnerName("def").setGroupName("456")); - iter = proto.listCachePools(""); + assertEquals(poolName, info.getPoolName()); + assertEquals(ownerName, info.getOwnerName()); + assertEquals(groupName, info.getGroupName()); + + ownerName = "def"; + groupName = "456"; + mode = new FsPermission((short)0700); + weight = 151; + dfs.modifyCachePool(new CachePoolInfo(poolName). + setOwnerName(ownerName).setGroupName(groupName). + setMode(mode).setWeight(weight)); + + iter = dfs.listCachePools(); info = iter.next(); - assertEquals("pool1", info.getPoolName()); - assertEquals("def", info.getOwnerName()); - assertEquals("456", info.getGroupName()); - assertEquals(new FsPermission((short)0755), info.getMode()); - assertEquals(Integer.valueOf(150), info.getWeight()); + assertEquals(poolName, info.getPoolName()); + assertEquals(ownerName, info.getOwnerName()); + assertEquals(groupName, info.getGroupName()); + assertEquals(mode, info.getMode()); + assertEquals(Integer.valueOf(weight), info.getWeight()); + + dfs.removeCachePool(poolName); + iter = dfs.listCachePools(); + assertFalse("expected no cache pools after deleting pool", iter.hasNext()); + + proto.listCachePools(null); try { proto.removeCachePool("pool99"); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove non-existent", + ioe); } - proto.removeCachePool("pool1"); try { - proto.removeCachePool("pool1"); + proto.removeCachePool(poolName); Assert.fail("expected to get an exception when " + "removing a non-existent pool."); } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("can't remove non-existent", + ioe); } + + iter = dfs.listCachePools(); + assertFalse("expected no cache pools after deleting pool", iter.hasNext()); } private static void validateListAll( RemoteIterator iter, - long id0, long id1, long id2) throws Exception { - Assert.assertEquals(new PathBasedCacheDescriptor(id0, - "/alpha", "pool1"), iter.next()); - Assert.assertEquals(new PathBasedCacheDescriptor(id1, - "/beta", "pool2"), iter.next()); - Assert.assertEquals(new PathBasedCacheDescriptor(id2, - "/gamma", "pool1"), iter.next()); - Assert.assertFalse(iter.hasNext()); + PathBasedCacheDescriptor... descriptors) throws Exception { + for (PathBasedCacheDescriptor descriptor: descriptors) { + assertTrue("Unexpectedly few elements", iter.hasNext()); + assertEquals("Unexpected descriptor", descriptor, iter.next()); + } + assertFalse("Unexpectedly many list elements", iter.hasNext()); + } + + private static PathBasedCacheDescriptor addAsUnprivileged( + final PathBasedCacheDirective directive) throws Exception { + return unprivilegedUser + .doAs(new PrivilegedExceptionAction() { + @Override + public PathBasedCacheDescriptor run() throws IOException { + DistributedFileSystem myDfs = + (DistributedFileSystem) FileSystem.get(conf); + return myDfs.addPathBasedCacheDirective(directive); + } + }); } @Test - public void testSetAndGet() throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = null; + public void testAddRemoveDirectives() throws Exception { + proto.addCachePool(new CachePoolInfo("pool1"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool2"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool3"). + setMode(new FsPermission((short)0777))); + proto.addCachePool(new CachePoolInfo("pool4"). + setMode(new FsPermission((short)0))); + + PathBasedCacheDirective alpha = + new PathBasedCacheDirective("/alpha", "pool1"); + PathBasedCacheDirective beta = + new PathBasedCacheDirective("/beta", "pool2"); + PathBasedCacheDirective delta = + new PathBasedCacheDirective("/delta", "pool1"); + + PathBasedCacheDescriptor alphaD = addAsUnprivileged(alpha); + PathBasedCacheDescriptor alphaD2 = addAsUnprivileged(alpha); + assertEquals("Expected to get the same descriptor when re-adding" + + "an existing PathBasedCacheDirective", alphaD, alphaD2); + PathBasedCacheDescriptor betaD = addAsUnprivileged(beta); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - final NamenodeProtocols proto = cluster.getNameNodeRpc(); - proto.addCachePool(new CachePoolInfo("pool1"). - setMode(new FsPermission((short)0777))); - proto.addCachePool(new CachePoolInfo("pool2"). - setMode(new FsPermission((short)0777))); - proto.addCachePool(new CachePoolInfo("pool3"). - setMode(new FsPermission((short)0777))); - proto.addCachePool(new CachePoolInfo("pool4"). - setMode(new FsPermission((short)0))); - - List> addResults1 = - unprivilegedUser.doAs(new PrivilegedExceptionAction< - List>>() { - @Override - public List> run() throws IOException { - return proto.addPathBasedCacheDirectives(Arrays.asList( - new PathBasedCacheDirective[] { - new PathBasedCacheDirective("/alpha", "pool1"), - new PathBasedCacheDirective("/beta", "pool2"), - new PathBasedCacheDirective("", "pool3"), - new PathBasedCacheDirective("/zeta", "nonexistent_pool"), - new PathBasedCacheDirective("/zeta", "pool4"), - new PathBasedCacheDirective("//illegal/path/", "pool1") - })); - } - }); - long ids1[] = new long[2]; - ids1[0] = addResults1.get(0).get().getEntryId(); - ids1[1] = addResults1.get(1).get().getEntryId(); - try { - addResults1.get(2).get(); - Assert.fail("expected an error when adding an empty path"); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof EmptyPathError); - } - try { - addResults1.get(3).get(); - Assert.fail("expected an error when adding to a nonexistent pool."); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); - } - try { - addResults1.get(4).get(); - Assert.fail("expected an error when adding to a pool with " + - "mode 0 (no permissions for anyone)."); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() - instanceof PoolWritePermissionDeniedError); - } - try { - addResults1.get(5).get(); - Assert.fail("expected an error when adding a malformed path " + - "to the cache directives."); - } catch (IOException ioe) { - //Assert.assertTrue(ioe.getCause() - //instanceof PoolWritePermissionDeniedError); - } - - List> addResults2 = - proto.addPathBasedCacheDirectives(Arrays.asList( - new PathBasedCacheDirective[] { - new PathBasedCacheDirective("/alpha", "pool1"), - new PathBasedCacheDirective("/theta", ""), - new PathBasedCacheDirective("bogus", "pool1"), - new PathBasedCacheDirective("/gamma", "pool1") - })); - long id = addResults2.get(0).get().getEntryId(); - Assert.assertEquals("expected to get back the same ID as last time " + - "when re-adding an existing PathBasedCache directive.", ids1[0], id); - try { - addResults2.get(1).get(); - Assert.fail("expected an error when adding a PathBasedCache " + - "directive with an empty pool name."); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); - } - try { - addResults2.get(2).get(); - Assert.fail("expected an error when adding a PathBasedCache " + - "directive with a non-absolute path name."); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidPathNameError); - } - long ids2[] = new long[1]; - ids2[0] = addResults2.get(3).get().getEntryId(); - - RemoteIterator iter = - proto.listPathBasedCacheDescriptors(0, null, null); - validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathBasedCacheDescriptors(0, null, null); - validateListAll(iter, ids1[0], ids1[1], ids2[0]); - iter = proto.listPathBasedCacheDescriptors(0, "pool3", null); - Assert.assertFalse(iter.hasNext()); - iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); - Assert.assertEquals(addResults1.get(1).get(), - iter.next()); - Assert.assertFalse(iter.hasNext()); - - List> removeResults1 = - proto.removePathBasedCacheDescriptors(Arrays.asList( - new Long[] { ids1[1], -42L, 999999L })); - Assert.assertEquals(Long.valueOf(ids1[1]), - removeResults1.get(0).get()); - try { - removeResults1.get(1).get(); - Assert.fail("expected an error when removing a negative ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof InvalidIdException); - } - try { - removeResults1.get(2).get(); - Assert.fail("expected an error when removing a nonexistent ID"); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); - } - iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); - Assert.assertFalse(iter.hasNext()); - } finally { - if (cluster != null) { cluster.shutdown(); } + addAsUnprivileged(new PathBasedCacheDirective("", "pool3")); + fail("expected an error when adding an empty path"); + } catch (IOException ioe) { + assertTrue(ioe instanceof EmptyPathError); } + + try { + addAsUnprivileged(new PathBasedCacheDirective("/unicorn", "no_such_pool")); + fail("expected an error when adding to a non-existent pool."); + } catch (IOException ioe) { + assertTrue(ioe instanceof InvalidPoolNameError); + } + + try { + addAsUnprivileged(new PathBasedCacheDirective("/blackhole", "pool4")); + fail("expected an error when adding to a pool with " + + "mode 0 (no permissions for anyone)."); + } catch (IOException ioe) { + assertTrue(ioe instanceof PoolWritePermissionDeniedError); + } + + try { + addAsUnprivileged(new PathBasedCacheDirective("//illegal/path/", "pool1")); + fail("expected an error when adding a malformed path " + + "to the cache directives."); + } catch (IOException ioe) { + assertTrue(ioe instanceof InvalidPathNameError); + } + + try { + addAsUnprivileged(new PathBasedCacheDirective("/emptypoolname", "")); + Assert.fail("expected an error when adding a PathBasedCache " + + "directive with an empty pool name."); + } catch (IOException ioe) { + Assert.assertTrue(ioe instanceof InvalidPoolNameError); + } + + try { + addAsUnprivileged(new PathBasedCacheDirective("bogus", "pool1")); + Assert.fail("expected an error when adding a PathBasedCache " + + "directive with a non-absolute path name."); + } catch (IOException ioe) { + Assert.assertTrue(ioe instanceof InvalidPathNameError); + } + + PathBasedCacheDescriptor deltaD = addAsUnprivileged(delta); + + RemoteIterator iter; + iter = proto.listPathBasedCacheDescriptors(0, null, null); + validateListAll(iter, alphaD, betaD, deltaD); + iter = proto.listPathBasedCacheDescriptors(0, "pool3", null); + Assert.assertFalse(iter.hasNext()); + iter = proto.listPathBasedCacheDescriptors(0, "pool1", null); + validateListAll(iter, alphaD, deltaD); + iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); + validateListAll(iter, betaD); + + dfs.removePathBasedCacheDescriptor(betaD); + iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); + Assert.assertFalse(iter.hasNext()); + + try { + dfs.removePathBasedCacheDescriptor(betaD); + Assert.fail("expected an error when removing a non-existent ID"); + } catch (IOException ioe) { + Assert.assertTrue(ioe instanceof NoSuchIdException); + } + + try { + proto.removePathBasedCacheDescriptor(-42l); + Assert.fail("expected an error when removing a negative ID"); + } catch (IOException ioe) { + Assert.assertTrue(ioe instanceof InvalidIdException); + } + try { + proto.removePathBasedCacheDescriptor(43l); + Assert.fail("expected an error when removing a non-existent ID"); + } catch (IOException ioe) { + Assert.assertTrue(ioe instanceof NoSuchIdException); + } + + dfs.removePathBasedCacheDescriptor(alphaD); + dfs.removePathBasedCacheDescriptor(deltaD); + iter = proto.listPathBasedCacheDescriptors(0, null, null); + assertFalse(iter.hasNext()); } } From 552e081a071ec46221b7773ec8e60483bdebc539 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Sat, 21 Sep 2013 00:22:02 +0000 Subject: [PATCH 24/51] Delete a file for HDFS-5236. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1525184 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/util/Fallible.java | 53 ------------------- 1 file changed, 53 deletions(-) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java deleted file mode 100644 index fe343d9eeaf..00000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Fallible.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.util; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Contains either a value of type T, or an IOException. - * - * This can be useful as a return value for batch APIs that need granular - * error reporting. - */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) -@InterfaceStability.Unstable -public class Fallible { - private final T val; - private final IOException ioe; - - public Fallible(T val) { - this.val = val; - this.ioe = null; - } - - public Fallible(IOException ioe) { - this.val = null; - this.ioe = ioe; - } - - public T get() throws IOException { - if (ioe != null) { - throw new IOException(ioe); - } - return this.val; - } -} From 9a361c5821508435b6aabd6640940341902719a1 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Tue, 24 Sep 2013 21:40:53 +0000 Subject: [PATCH 25/51] HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more intuitive (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1526020 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/fs/ByteBufferUtil.java | 113 ++++ .../apache/hadoop/fs/FSDataInputStream.java | 53 +- .../org/apache/hadoop/fs/FSInputStream.java | 9 +- .../fs/HasEnhancedByteBufferAccess.java | 79 +++ ...{SupportsZeroCopy.java => ReadOption.java} | 26 +- .../org/apache/hadoop/fs/ZeroCopyCursor.java | 111 ---- .../org/apache/hadoop/io/ByteBufferPool.java | 48 ++ .../hadoop/io/ElasticByteBufferPool.java | 96 ++++ .../apache/hadoop/util/IdentityHashStore.java | 197 +++++++ .../hadoop/util/TestIdentityHashStore.java | 159 ++++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/BlockReader.java | 17 +- .../apache/hadoop/hdfs/BlockReaderLocal.java | 32 +- .../hadoop/hdfs/BlockReaderLocalLegacy.java | 6 +- .../org/apache/hadoop/hdfs/DFSClient.java | 3 +- .../apache/hadoop/hdfs/DFSInputStream.java | 143 +++-- .../hadoop/hdfs/HdfsZeroCopyCursor.java | 148 ----- .../apache/hadoop/hdfs/RemoteBlockReader.java | 6 +- .../hadoop/hdfs/RemoteBlockReader2.java | 6 +- .../hadoop/hdfs/client/ClientMmapManager.java | 8 +- .../src/main/native/libhdfs/expect.c | 16 + .../src/main/native/libhdfs/hdfs.c | 525 ++++++++++------- .../src/main/native/libhdfs/hdfs.h | 134 +++-- .../src/main/native/libhdfs/jni_helper.c | 31 + .../src/main/native/libhdfs/jni_helper.h | 15 + .../libhdfs/test/test_libhdfs_zerocopy.c | 72 +-- .../fs/TestEnhancedByteBufferAccess.java | 530 ++++++++++++++++++ .../hadoop/hdfs/TestBlockReaderLocal.java | 324 ----------- 28 files changed, 1933 insertions(+), 977 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java rename hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/{SupportsZeroCopy.java => ReadOption.java} (61%) delete mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java new file mode 100644 index 00000000000..c31c29b5b6d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.ByteBufferPool; + +import com.google.common.base.Preconditions; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ByteBufferUtil { + + /** + * Determine if a stream can do a byte buffer read via read(ByteBuffer buf) + */ + private static boolean streamHasByteBufferRead(InputStream stream) { + if (!(stream instanceof ByteBufferReadable)) { + return false; + } + if (!(stream instanceof FSDataInputStream)) { + return true; + } + return ((FSDataInputStream)stream).getWrappedStream() + instanceof ByteBufferReadable; + } + + /** + * Perform a fallback read. + */ + public static ByteBuffer fallbackRead( + InputStream stream, ByteBufferPool bufferPool, int maxLength) + throws IOException { + if (bufferPool == null) { + throw new UnsupportedOperationException("zero-copy reads " + + "were not available, and you did not provide a fallback " + + "ByteBufferPool."); + } + boolean useDirect = streamHasByteBufferRead(stream); + ByteBuffer buffer = bufferPool.getBuffer(useDirect, maxLength); + if (buffer == null) { + throw new UnsupportedOperationException("zero-copy reads " + + "were not available, and the ByteBufferPool did not provide " + + "us with " + (useDirect ? "a direct" : "an indirect") + + "buffer."); + } + Preconditions.checkState(buffer.capacity() > 0); + Preconditions.checkState(buffer.isDirect() == useDirect); + maxLength = Math.min(maxLength, buffer.capacity()); + boolean success = false; + try { + if (useDirect) { + buffer.clear(); + buffer.limit(maxLength); + ByteBufferReadable readable = (ByteBufferReadable)stream; + int totalRead = 0; + while (true) { + if (totalRead >= maxLength) { + success = true; + break; + } + int nRead = readable.read(buffer); + if (nRead < 0) { + if (totalRead > 0) { + success = true; + } + break; + } + totalRead += nRead; + } + buffer.flip(); + } else { + buffer.clear(); + int nRead = stream.read(buffer.array(), + buffer.arrayOffset(), maxLength); + if (nRead >= 0) { + buffer.limit(nRead); + success = true; + } + } + } finally { + if (!success) { + // If we got an error while reading, or if we are at EOF, we + // don't need the buffer any more. We can give it back to the + // bufferPool. + bufferPool.putBuffer(buffer); + buffer = null; + } + } + return buffer; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java index 25a971447f2..a77ca437729 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java @@ -1,4 +1,5 @@ /** + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +20,13 @@ import java.io.*; import java.nio.ByteBuffer; +import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.ByteBufferPool; +import org.apache.hadoop.fs.ByteBufferUtil; +import org.apache.hadoop.util.IdentityHashStore; /** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream} * and buffers input through a {@link BufferedInputStream}. */ @@ -30,7 +35,15 @@ public class FSDataInputStream extends DataInputStream implements Seekable, PositionedReadable, Closeable, ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead, - SupportsZeroCopy { + HasEnhancedByteBufferAccess { + /** + * Map ByteBuffers that we have handed out to readers to ByteBufferPool + * objects + */ + private final IdentityHashStore + extendedReadBuffers + = new IdentityHashStore(0); + public FSDataInputStream(InputStream in) throws IOException { super(in); @@ -169,13 +182,43 @@ public void setDropBehind(Boolean dropBehind) } @Override - public ZeroCopyCursor createZeroCopyCursor() - throws IOException, ZeroCopyUnavailableException { + public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, + EnumSet opts) + throws IOException, UnsupportedOperationException { try { - return ((SupportsZeroCopy)in).createZeroCopyCursor(); + return ((HasEnhancedByteBufferAccess)in).read(bufferPool, + maxLength, opts); } catch (ClassCastException e) { - throw new ZeroCopyUnavailableException(e); + ByteBuffer buffer = ByteBufferUtil. + fallbackRead(this, bufferPool, maxLength); + if (buffer != null) { + extendedReadBuffers.put(buffer, bufferPool); + } + return buffer; + } + } + + private static final EnumSet EMPTY_READ_OPTIONS_SET = + EnumSet.noneOf(ReadOption.class); + + final public ByteBuffer read(ByteBufferPool bufferPool, int maxLength) + throws IOException, UnsupportedOperationException { + return read(bufferPool, maxLength, EMPTY_READ_OPTIONS_SET); + } + + @Override + public void releaseBuffer(ByteBuffer buffer) { + try { + ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer); + } + catch (ClassCastException e) { + ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer); + if (bufferPool == null) { + throw new IllegalArgumentException("tried to release a buffer " + + "that was not created by this stream."); + } + bufferPool.putBuffer(buffer); } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java index e3308814ce2..148e6745f60 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java @@ -32,7 +32,7 @@ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable public abstract class FSInputStream extends InputStream - implements Seekable, PositionedReadable, SupportsZeroCopy { + implements Seekable, PositionedReadable { /** * Seek to the given offset from the start of the file. * The next read() will be from that location. Can't @@ -88,11 +88,4 @@ public void readFully(long position, byte[] buffer) throws IOException { readFully(position, buffer, 0, buffer.length); } - - @Override - public ZeroCopyCursor createZeroCopyCursor() - throws IOException, ZeroCopyUnavailableException { - throw new ZeroCopyUnavailableException("zero copy is not implemented " + - "for this filesystem type."); - } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java new file mode 100644 index 00000000000..31178185020 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.EnumSet; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.ByteBufferPool; + +/** + * FSDataInputStreams implement this interface to provide enhanced + * byte buffer access. Usually this takes the form of mmap support. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface HasEnhancedByteBufferAccess { + /** + * Get a ByteBuffer containing file data. + * + * This ByteBuffer may come from the stream itself, via a call like mmap, + * or it may come from the ByteBufferFactory which is passed in as an + * argument. + * + * @param factory + * If this is non-null, it will be used to create a fallback + * ByteBuffer when the stream itself cannot create one. + * @param maxLength + * The maximum length of buffer to return. We may return a buffer + * which is shorter than this. + * @param opts + * Options to use when reading. + * + * @return + * We will return null on EOF (and only on EOF). + * Otherwise, we will return a direct ByteBuffer containing at + * least one byte. You must free this ByteBuffer when you are + * done with it by calling releaseBuffer on it. + * The buffer will continue to be readable until it is released + * in this manner. However, the input stream's close method may + * warn about unclosed buffers. + * @throws + * IOException: if there was an error reading. + * UnsupportedOperationException: if factory was null, and we + * needed an external byte buffer. UnsupportedOperationException + * will never be thrown unless the factory argument is null. + */ + public ByteBuffer read(ByteBufferPool factory, int maxLength, + EnumSet opts) + throws IOException, UnsupportedOperationException; + + /** + * Release a ByteBuffer which was created by the enhanced ByteBuffer read + * function. You must not continue using the ByteBuffer after calling this + * function. + * + * @param buffer + * The ByteBuffer to release. + */ + public void releaseBuffer(ByteBuffer buffer); +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java similarity index 61% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java index 2a4d51da07a..fa7aca1c497 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/SupportsZeroCopy.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java @@ -15,30 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.fs; -import java.io.IOException; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** - * Supports zero-copy reads. + * Options that can be used when reading from a FileSystem. */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface SupportsZeroCopy { +@InterfaceAudience.Public +@InterfaceStability.Stable +public enum ReadOption { /** - * Get a zero-copy cursor to use for zero-copy reads. - * - * @throws IOException - * If there was an error creating the ZeroCopyCursor - * @throws UnsupportedOperationException - * If this stream does not support zero-copy reads. - * This is used, for example, when one stream wraps another - * which may or may not support ZCR. + * Skip checksums when reading. This option may be useful when reading a file + * format that has built-in checksums, or for testing purposes. */ - public ZeroCopyCursor createZeroCopyCursor() - throws IOException, ZeroCopyUnavailableException; -} \ No newline at end of file + SKIP_CHECKSUMS, +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java deleted file mode 100644 index 5181b49da2d..00000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyCursor.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs; - -import java.io.Closeable; -import java.io.EOFException; -import java.io.IOException; -import java.nio.ByteBuffer; - -/** - * A ZeroCopyCursor allows you to make zero-copy reads. - * - * Cursors should be closed when they are no longer needed. - * - * Example: - * FSDataInputStream fis = fs.open("/file"); - * ZeroCopyCursor cursor = fis.createZeroCopyCursor(); - * try { - * cursor.read(128); - * ByteBuffer data = cursor.getData(); - * processData(data); - * } finally { - * cursor.close(); - * } - */ -public interface ZeroCopyCursor extends Closeable { - /** - * Set the fallback buffer used for this zero copy cursor. - * The fallback buffer is used when a true zero-copy read is impossible. - * If there is no fallback buffer, UnsupportedOperationException is thrown - * when a true zero-copy read cannot be done. - * - * @param fallbackBuffer The fallback buffer to set, or null for none. - */ - public void setFallbackBuffer(ByteBuffer fallbackBuffer); - - /** - * @return the fallback buffer in use, or null if there is none. - */ - public ByteBuffer getFallbackBuffer(); - - /** - * @param skipChecksums Whether we should skip checksumming with this - * zero copy cursor. - */ - public void setSkipChecksums(boolean skipChecksums); - - /** - * @return Whether we should skip checksumming with this - * zero copy cursor. - */ - public boolean getSkipChecksums(); - - /** - * @param allowShortReads Whether we should allow short reads. - */ - public void setAllowShortReads(boolean allowShortReads); - - /** - * @return Whether we should allow short reads. - */ - public boolean getAllowShortReads(); - - /** - * Perform a zero-copy read. - * - * @param toRead The minimum number of bytes to read. - * Must not be negative. If we hit EOF before - * reading this many bytes, we will either throw - * EOFException (if allowShortReads = false), or - * return a short read (if allowShortReads = true). - * A short read could be as short as 0 bytes. - * @throws UnsupportedOperationException - * If a true zero-copy read cannot be done, and no fallback - * buffer was set. - * @throws EOFException - * If allowShortReads = false, and we can't read all the bytes - * that were requested. This will never be thrown if - * allowShortReads = true. - * @throws IOException - * If there was an error while reading the data. - */ - public void read(int toRead) - throws UnsupportedOperationException, EOFException, IOException; - - /** - * Get the current data buffer. - * - * This buffer will remain valid until either this cursor is closed, or we - * call read() again on this same cursor. You can find the amount of data - * that was read previously by calling ByteBuffer#remaining. - * - * @return The current data buffer. - */ - public ByteBuffer getData(); -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java new file mode 100644 index 00000000000..bb2c978e092 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface ByteBufferPool { + /** + * Get a new direct ByteBuffer. The pool can provide this from + * removing a buffer from its internal cache, or by allocating a + * new buffer. + * + * @param direct Whether the buffer should be direct. + * @param minLength The minimum length the buffer will have. + * @return A new ByteBuffer. This ByteBuffer must be direct. + * Its capacity can be less than what was requested, but + * must be at least 1 byte. + */ + ByteBuffer getBuffer(boolean direct, int length); + + /** + * Release a buffer back to the pool. + * The pool may choose to put this buffer into its cache. + * + * @param buffer a direct bytebuffer + */ + void putBuffer(ByteBuffer buffer); +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java new file mode 100644 index 00000000000..f1ea9ff6b5b --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io; + +import com.google.common.collect.ComparisonChain; + +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is a simple ByteBufferPool which just creates ByteBuffers as needed. + * It also caches ByteBuffers after they're released. It will always return + * the smallest cached buffer with at least the capacity you request. + * We don't try to do anything clever here like try to limit the maximum cache + * size. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public final class ElasticByteBufferPool implements ByteBufferPool { + private static final class Key implements Comparable { + private final int capacity; + private final long insertionTime; + + Key(int capacity, long insertionTime) { + this.capacity = capacity; + this.insertionTime = insertionTime; + } + + @Override + public int compareTo(Key other) { + return ComparisonChain.start(). + compare(capacity, other.capacity). + compare(insertionTime, other.insertionTime). + result(); + } + } + + private final TreeMap buffers = + new TreeMap(); + + private final TreeMap directBuffers = + new TreeMap(); + + private final TreeMap getBufferTree(boolean direct) { + return direct ? directBuffers : buffers; + } + + @Override + public synchronized ByteBuffer getBuffer(boolean direct, int length) { + TreeMap tree = getBufferTree(direct); + Map.Entry entry = + tree.ceilingEntry(new Key(length, 0)); + if (entry == null) { + return direct ? ByteBuffer.allocateDirect(length) : + ByteBuffer.allocate(length); + } + tree.remove(entry.getKey()); + return entry.getValue(); + } + + @Override + public synchronized void putBuffer(ByteBuffer buffer) { + TreeMap tree = getBufferTree(buffer.isDirect()); + while (true) { + Key key = new Key(buffer.capacity(), System.nanoTime()); + if (!tree.containsKey(key)) { + tree.put(key, buffer); + return; + } + // Buffers are indexed by (capacity, time). + // If our key is not unique on the first try, we try again, since the + // time will be different. Since we use nanoseconds, it's pretty + // unlikely that we'll loop even once, unless the system clock has a + // poor granularity. + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java new file mode 100644 index 00000000000..4209488d39a --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.base.Preconditions; + +/** + * The IdentityHashStore stores (key, value) mappings in an array. + * It is similar to java.util.HashTable, but much more lightweight. + * Neither inserting nor removing an element ever leads to any garbage + * getting created (assuming the array doesn't need to be enlarged). + * + * Unlike HashTable, it compares keys using + * {@link System#identityHashCode(Object)} and the identity operator. + * This is useful for types like ByteBuffer which have expensive hashCode + * and equals operators. + * + * We use linear probing to resolve collisions. This avoids the need for + * the overhead of linked list data structures. It also means that it is + * expensive to attempt to remove an element that isn't there, since we + * have to look at the entire array to be sure that it doesn't exist. + * + * @param The key type to use. + * @param THe value type to use. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +@SuppressWarnings("unchecked") +public final class IdentityHashStore { + /** + * Even elements are keys; odd elements are values. + * The array has size 1 + Math.pow(2, capacity). + */ + private Object buffer[]; + + private int numInserted = 0; + + private int capacity; + + /** + * The default maxCapacity value to use. + */ + private static final int DEFAULT_MAX_CAPACITY = 2; + + public IdentityHashStore(int capacity) { + Preconditions.checkArgument(capacity >= 0); + if (capacity == 0) { + this.capacity = 0; + this.buffer = null; + } else { + // Round the capacity we need up to a power of 2. + realloc((int)Math.pow(2, + Math.ceil(Math.log(capacity) / Math.log(2)))); + } + } + + private void realloc(int newCapacity) { + Preconditions.checkArgument(newCapacity > 0); + Object prevBuffer[] = buffer; + this.capacity = newCapacity; + // Each element takes two array slots -- one for the key, + // and another for the value. We also want a load factor + // of 0.50. Combine those together and you get 4 * newCapacity. + this.buffer = new Object[4 * newCapacity]; + this.numInserted = 0; + if (prevBuffer != null) { + for (int i = 0; i < prevBuffer.length; i += 2) { + if (prevBuffer[i] != null) { + putInternal(prevBuffer[i], prevBuffer[i + 1]); + } + } + } + } + + private void putInternal(Object k, Object v) { + int hash = System.identityHashCode(k); + final int numEntries = buffer.length / 2; + int index = hash % numEntries; + while (true) { + if (buffer[2 * index] == null) { + buffer[2 * index] = k; + buffer[1 + (2 * index)] = v; + numInserted++; + return; + } + index = (index + 1) % numEntries; + } + } + + /** + * Add a new (key, value) mapping. + * + * Inserting a new (key, value) never overwrites a previous one. + * In other words, you can insert the same key multiple times and it will + * lead to multiple entries. + */ + public void put(K k, V v) { + Preconditions.checkNotNull(k); + if (buffer == null) { + realloc(DEFAULT_MAX_CAPACITY); + } else if (numInserted + 1 > capacity) { + realloc(capacity * 2); + } + putInternal(k, v); + } + + private int getElementIndex(K k) { + if (buffer == null) { + return -1; + } + final int numEntries = buffer.length / 2; + int hash = System.identityHashCode(k); + int index = hash % numEntries; + int firstIndex = index; + do { + if (buffer[2 * index] == k) { + return index; + } + index = (index + 1) % numEntries; + } while (index != firstIndex); + return -1; + } + + /** + * Retrieve a value associated with a given key. + */ + public V get(K k) { + int index = getElementIndex(k); + if (index < 0) { + return null; + } + return (V)buffer[1 + (2 * index)]; + } + + /** + * Retrieve a value associated with a given key, and delete the + * relevant entry. + */ + public V remove(K k) { + int index = getElementIndex(k); + if (index < 0) { + return null; + } + V val = (V)buffer[1 + (2 * index)]; + buffer[2 * index] = null; + buffer[1 + (2 * index)] = null; + numInserted--; + return val; + } + + public boolean isEmpty() { + return numInserted == 0; + } + + public int numElements() { + return numInserted; + } + + public int capacity() { + return capacity; + } + + public interface Visitor { + void accept(K k, V v); + } + + /** + * Visit all key, value pairs in the IdentityHashStore. + */ + public void visitAll(Visitor visitor) { + int length = buffer == null ? 0 : buffer.length; + for (int i = 0; i < length; i += 2) { + if (buffer[i] != null) { + visitor.accept((K)buffer[i], (V)buffer[i + 1]); + } + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java new file mode 100644 index 00000000000..795c8e32ccf --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIdentityHashStore.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.util.IdentityHashStore; +import org.apache.hadoop.util.IdentityHashStore.Visitor; +import org.junit.Test; + +public class TestIdentityHashStore { + private static final Log LOG = LogFactory.getLog(TestIdentityHashStore.class.getName()); + + private static class Key { + private final String name; + + Key(String name) { + this.name = name; + } + + @Override + public int hashCode() { + throw new RuntimeException("should not be used!"); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof Key)) { + return false; + } + Key other = (Key)o; + return name.equals(other.name); + } + } + + @Test(timeout=60000) + public void testStartingWithZeroCapacity() { + IdentityHashStore store = + new IdentityHashStore(0); + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + Assert.fail("found key " + k + " in empty IdentityHashStore."); + } + }); + Assert.assertTrue(store.isEmpty()); + final Key key1 = new Key("key1"); + Integer value1 = new Integer(100); + store.put(key1, value1); + Assert.assertTrue(!store.isEmpty()); + Assert.assertEquals(value1, store.get(key1)); + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + Assert.assertEquals(key1, k); + } + }); + Assert.assertEquals(value1, store.remove(key1)); + Assert.assertTrue(store.isEmpty()); + } + + @Test(timeout=60000) + public void testDuplicateInserts() { + IdentityHashStore store = + new IdentityHashStore(4); + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + Assert.fail("found key " + k + " in empty IdentityHashStore."); + } + }); + Assert.assertTrue(store.isEmpty()); + Key key1 = new Key("key1"); + Integer value1 = new Integer(100); + Integer value2 = new Integer(200); + Integer value3 = new Integer(300); + store.put(key1, value1); + Key equalToKey1 = new Key("key1"); + + // IdentityHashStore compares by object equality, not equals() + Assert.assertNull(store.get(equalToKey1)); + + Assert.assertTrue(!store.isEmpty()); + Assert.assertEquals(value1, store.get(key1)); + store.put(key1, value2); + store.put(key1, value3); + final List allValues = new LinkedList(); + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + allValues.add(v); + } + }); + Assert.assertEquals(3, allValues.size()); + for (int i = 0; i < 3; i++) { + Integer value = store.remove(key1); + Assert.assertTrue(allValues.remove(value)); + } + Assert.assertNull(store.remove(key1)); + Assert.assertTrue(store.isEmpty()); + } + + @Test(timeout=60000) + public void testAdditionsAndRemovals() { + IdentityHashStore store = + new IdentityHashStore(0); + final int NUM_KEYS = 1000; + LOG.debug("generating " + NUM_KEYS + " keys"); + final List keys = new ArrayList(NUM_KEYS); + for (int i = 0; i < NUM_KEYS; i++) { + keys.add(new Key("key " + i)); + } + for (int i = 0; i < NUM_KEYS; i++) { + store.put(keys.get(i), i); + } + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + Assert.assertTrue(keys.contains(k)); + } + }); + for (int i = 0; i < NUM_KEYS; i++) { + Assert.assertEquals(Integer.valueOf(i), + store.remove(keys.get(i))); + } + store.visitAll(new Visitor() { + @Override + public void accept(Key k, Integer v) { + Assert.fail("expected all entries to be removed"); + } + }); + Assert.assertTrue("expected the store to be " + + "empty, but found " + store.numElements() + " elements.", + store.isEmpty()); + Assert.assertEquals(1024, store.capacity()); + } + +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 9e936943013..51af0b7aa3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -45,6 +45,9 @@ HDFS-4949 (Unreleased) HDFS-5236. Change PathBasedCacheDirective APIs to be a single value rather than batch. (Contributed by Andrew Wang) + HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more + intuitive. (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java index 456a79f7d89..2f0686a9beb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.hdfs.client.ClientMmap; import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -28,6 +29,7 @@ * from a single datanode. */ public interface BlockReader extends ByteBufferReadable { + /* same interface as inputStream java.io.InputStream#read() * used by DFSInputStream#read() @@ -85,19 +87,12 @@ public interface BlockReader extends ByteBufferReadable { boolean isShortCircuit(); /** - * Do a zero-copy read with the current block reader. + * Get a ClientMmap object for this BlockReader. * - * We assume that the calling code has done bounds checking, and won't ask - * us for more bytes than are supposed to be visible (or are in the file). - * - * @param buffers The zero-copy buffers object. * @param curBlock The current block. - * @param blockPos Position in the current block to start reading at. - * @param toRead The number of bytes to read from the block. - * - * @return true if the read was done, false otherwise. + * @return The ClientMmap object, or null if mmap is not + * supported. */ - boolean readZeroCopy(HdfsZeroCopyCursor buffers, - LocatedBlock curBlock, long blockPos, int toRead, + ClientMmap getClientMmap(LocatedBlock curBlock, ClientMmapManager mmapManager); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 3e430a150c0..aeac1757976 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -548,46 +548,28 @@ public boolean isShortCircuit() { } @Override - public boolean readZeroCopy(HdfsZeroCopyCursor cursor, - LocatedBlock curBlock, long blockPos, int toRead, - ClientMmapManager mmapManager) { + public ClientMmap getClientMmap(LocatedBlock curBlock, + ClientMmapManager mmapManager) { if (clientMmap == null) { if (mmapDisabled) { - return false; + return null; } try { clientMmap = mmapManager.fetch(datanodeID, block, dataIn); if (clientMmap == null) { mmapDisabled = true; - return false; + return null; } } catch (InterruptedException e) { LOG.error("Interrupted while setting up mmap for " + filename, e); Thread.currentThread().interrupt(); - return false; + return null; } catch (IOException e) { LOG.error("unable to set up mmap for " + filename, e); mmapDisabled = true; - return false; + return null; } } - long limit = blockPos + toRead; - if (limit > Integer.MAX_VALUE) { - /* - * In Java, ByteBuffers use a 32-bit length, capacity, offset, etc. - * This limits our mmap'ed regions to 2 GB in length. - * TODO: we can implement zero-copy for larger blocks by doing multiple - * mmaps. - */ - mmapDisabled = true; - clientMmap.unref(); - clientMmap = null; - return false; - } - ByteBuffer mmapBuffer = clientMmap.getMappedByteBuffer().duplicate(); - mmapBuffer.position((int)blockPos); - mmapBuffer.limit((int)limit); - cursor.setMmap(clientMmap, mmapBuffer); - return true; + return clientMmap; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index a4a4f680bfa..85ee41b6305 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -28,6 +28,7 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdfs.client.ClientMmap; import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -705,9 +706,8 @@ public boolean isShortCircuit() { } @Override - public boolean readZeroCopy(HdfsZeroCopyCursor buffers, - LocatedBlock curBlock, long blockPos, int toRead, + public ClientMmap getClientMmap(LocatedBlock curBlock, ClientMmapManager mmapManager) { - return false; + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index f032bd1bddf..8ea0939f9de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2610,7 +2610,8 @@ public CachingStrategy getDefaultWriteCachingStrategy() { return defaultWriteCachingStrategy; } - ClientMmapManager getMmapManager() { + @VisibleForTesting + public ClientMmapManager getMmapManager() { return mmapManager; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 06b3b68b2d5..74fcc6f14ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -24,6 +24,7 @@ import java.nio.ByteBuffer; import java.util.AbstractMap; import java.util.ArrayList; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -36,12 +37,15 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.ByteBufferUtil; import org.apache.hadoop.fs.CanSetDropBehind; import org.apache.hadoop.fs.CanSetReadahead; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.HasEnhancedByteBufferAccess; +import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.UnresolvedLinkException; -import org.apache.hadoop.fs.ZeroCopyCursor; +import org.apache.hadoop.hdfs.client.ClientMmap; import org.apache.hadoop.hdfs.net.DomainPeer; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; @@ -55,12 +59,14 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; +import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.IdentityHashStore; import com.google.common.annotations.VisibleForTesting; @@ -70,7 +76,8 @@ ****************************************************************/ @InterfaceAudience.Private public class DFSInputStream extends FSInputStream -implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead { +implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, + HasEnhancedByteBufferAccess { @VisibleForTesting static boolean tcpReadsDisabledForTesting = false; private final PeerCache peerCache; @@ -88,6 +95,15 @@ public class DFSInputStream extends FSInputStream private CachingStrategy cachingStrategy; private final ReadStatistics readStatistics = new ReadStatistics(); + /** + * Track the ByteBuffers that we have handed out to readers. + * + * The value type can be either ByteBufferPool or ClientMmap, depending on + * whether we this is a memory-mapped buffer or not. + */ + private final IdentityHashStore + extendedReadBuffers = new IdentityHashStore(0); + public static class ReadStatistics { public ReadStatistics() { this.totalBytesRead = 0; @@ -606,6 +622,20 @@ public synchronized void close() throws IOException { } dfsClient.checkOpen(); + if (!extendedReadBuffers.isEmpty()) { + final StringBuilder builder = new StringBuilder(); + extendedReadBuffers.visitAll(new IdentityHashStore.Visitor() { + private String prefix = ""; + @Override + public void accept(ByteBuffer k, Object v) { + builder.append(prefix).append(k); + prefix = ", "; + } + }); + DFSClient.LOG.warn("closing file " + src + ", but there are still " + + "unreleased ByteBuffers allocated by read(). " + + "Please release " + builder.toString() + "."); + } if (blockReader != null) { blockReader.close(); blockReader = null; @@ -1413,9 +1443,11 @@ public synchronized void setDropBehind(Boolean dropBehind) closeCurrentBlockReader(); } - synchronized void readZeroCopy(HdfsZeroCopyCursor zcursor, int toRead) - throws IOException { - assert(toRead > 0); + @Override + public synchronized ByteBuffer read(ByteBufferPool bufferPool, + int maxLength, EnumSet opts) + throws IOException, UnsupportedOperationException { + assert(maxLength > 0); if (((blockReader == null) || (blockEnd == -1)) && (pos < getFileLength())) { /* @@ -1429,50 +1461,81 @@ synchronized void readZeroCopy(HdfsZeroCopyCursor zcursor, int toRead) "at position " + pos); } } + boolean canSkipChecksums = opts.contains(ReadOption.SKIP_CHECKSUMS); + if (canSkipChecksums) { + ByteBuffer buffer = tryReadZeroCopy(maxLength); + if (buffer != null) { + return buffer; + } + } + ByteBuffer buffer = ByteBufferUtil. + fallbackRead(this, bufferPool, maxLength); + if (buffer != null) { + extendedReadBuffers.put(buffer, bufferPool); + } + return buffer; + } + + private synchronized ByteBuffer tryReadZeroCopy(int maxLength) + throws IOException { + // Java ByteBuffers can't be longer than 2 GB, because they use + // 4-byte signed integers to represent capacity, etc. + // So we can't mmap the parts of the block higher than the 2 GB offset. + // FIXME: we could work around this with multiple memory maps. + // See HDFS-5101. + long blockEnd32 = Math.min(Integer.MAX_VALUE, blockEnd); long curPos = pos; - boolean canSkipChecksums = zcursor.getSkipChecksums(); - long blockLeft = blockEnd - curPos + 1; - if (zcursor.getAllowShortReads()) { - if (blockLeft < toRead) { - toRead = (int)blockLeft; + long blockLeft = blockEnd32 - curPos + 1; + if (blockLeft <= 0) { + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + + curPos + " of " + src + "; blockLeft = " + blockLeft + + "; blockEnd32 = " + blockEnd32 + ", blockEnd = " + blockEnd + + "; maxLength = " + maxLength); } + return null; } - if (canSkipChecksums && (toRead <= blockLeft)) { - long blockStartInFile = currentLocatedBlock.getStartOffset(); - long blockPos = curPos - blockStartInFile; - if (blockReader.readZeroCopy(zcursor, - currentLocatedBlock, blockPos, toRead, - dfsClient.getMmapManager())) { - if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("readZeroCopy read " + toRead + " bytes from " + - "offset " + curPos + " via the zero-copy read path. " + - "blockEnd = " + blockEnd); - } - readStatistics.addZeroCopyBytes(toRead); - seek(pos + toRead); - return; + int length = Math.min((int)blockLeft, maxLength); + long blockStartInFile = currentLocatedBlock.getStartOffset(); + long blockPos = curPos - blockStartInFile; + long limit = blockPos + length; + ClientMmap clientMmap = + blockReader.getClientMmap(currentLocatedBlock, + dfsClient.getMmapManager()); + if (clientMmap == null) { + if (DFSClient.LOG.isDebugEnabled()) { + DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + + curPos + " of " + src + "; BlockReader#getClientMmap returned " + + "null."); } + return null; } - /* - * Slow path reads. - * - * readStatistics will be updated when we call back into this - * stream's read methods. - */ - long prevBlockEnd = blockEnd; - int slowReadAmount = zcursor.readViaSlowPath(toRead); + seek(pos + length); + ByteBuffer buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer(); + buffer.position((int)blockPos); + buffer.limit((int)limit); + clientMmap.ref(); + extendedReadBuffers.put(buffer, clientMmap); + readStatistics.addZeroCopyBytes(length); if (DFSClient.LOG.isDebugEnabled()) { - DFSClient.LOG.debug("readZeroCopy read " + slowReadAmount + " bytes " + - "from offset " + curPos + " via the fallback read path. " + - "prevBlockEnd = " + prevBlockEnd + ", blockEnd = " + blockEnd + - ", canSkipChecksums = " + canSkipChecksums); + DFSClient.LOG.debug("readZeroCopy read " + maxLength + " bytes from " + + "offset " + curPos + " via the zero-copy read path. " + + "blockEnd = " + blockEnd); } + return buffer; } @Override - public ZeroCopyCursor createZeroCopyCursor() - throws IOException, UnsupportedOperationException { - return new HdfsZeroCopyCursor(this, - dfsClient.getConf().skipShortCircuitChecksums); + public synchronized void releaseBuffer(ByteBuffer buffer) { + Object val = extendedReadBuffers.remove(buffer); + if (val == null) { + throw new IllegalArgumentException("tried to release a buffer " + + "that was not created by this stream, " + buffer); + } + if (val instanceof ClientMmap) { + ((ClientMmap)val).unref(); + } else if (val instanceof ByteBufferPool) { + ((ByteBufferPool)val).putBuffer(buffer); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java deleted file mode 100644 index 42b3eb7bcf1..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsZeroCopyCursor.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.io.EOFException; -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.ZeroCopyCursor; -import org.apache.hadoop.hdfs.client.ClientMmap; - -public class HdfsZeroCopyCursor implements ZeroCopyCursor { - public static final Log LOG = LogFactory.getLog(HdfsZeroCopyCursor.class); - private DFSInputStream stream; - private boolean skipChecksums; - private boolean allowShortReads; - private ClientMmap mmap; - private ByteBuffer fallbackBuffer; - private ByteBuffer readBuffer; - - HdfsZeroCopyCursor(DFSInputStream stream, boolean skipChecksums) { - this.stream = stream; - this.skipChecksums = skipChecksums; - this.allowShortReads = false; - this.mmap = null; - this.fallbackBuffer = null; - this.readBuffer = null; - } - - @Override - public void close() throws IOException { - stream = null; - if (mmap != null) { - mmap.unref(); - mmap = null; - } - fallbackBuffer = null; - readBuffer = null; - } - - @Override - public void setFallbackBuffer(ByteBuffer fallbackBuffer) { - this.fallbackBuffer = fallbackBuffer; - } - - @Override - public ByteBuffer getFallbackBuffer() { - return this.fallbackBuffer; - } - - @Override - public void setSkipChecksums(boolean skipChecksums) { - this.skipChecksums = skipChecksums; - } - - @Override - public boolean getSkipChecksums() { - return this.skipChecksums; - } - - @Override - public void setAllowShortReads(boolean allowShortReads) { - this.allowShortReads = allowShortReads; - } - - @Override - public boolean getAllowShortReads() { - return this.allowShortReads; - } - - @Override - public void read(int toRead) throws UnsupportedOperationException, - EOFException, IOException { - if (toRead < 0) { - throw new IllegalArgumentException("can't read " + toRead + " bytes."); - } - stream.readZeroCopy(this, toRead); - } - - @Override - public ByteBuffer getData() { - return readBuffer; - } - - int readViaSlowPath(int toRead) throws EOFException, IOException { - if (fallbackBuffer == null) { - throw new UnsupportedOperationException("unable to read via " + - "the fastpath, and there was no fallback buffer provided."); - } - fallbackBuffer.clear(); - fallbackBuffer.limit(toRead); // will throw if toRead is too large - - int totalRead = 0; - readBuffer = fallbackBuffer; - try { - while (toRead > 0) { - int nread = stream.read(fallbackBuffer); - if (nread < 0) { - break; - } - toRead -= nread; - totalRead += nread; - if (allowShortReads) { - break; - } - } - } finally { - fallbackBuffer.flip(); - } - if ((toRead > 0) && (!allowShortReads)) { - throw new EOFException("only read " + totalRead + " bytes out of " + - "a requested " + toRead + " before hitting EOF"); - } - return totalRead; - } - - void setMmap(ClientMmap mmap, ByteBuffer readBuffer) { - if (this.mmap != mmap) { - if (this.mmap != null) { - this.mmap.unref(); - } - } - this.mmap = mmap; - mmap.ref(); - this.readBuffer = readBuffer; - } - - ClientMmap getMmap() { - return mmap; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index eab35c97821..f587c3b5d58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FSInputChecker; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.ClientMmap; import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -489,9 +490,8 @@ public boolean isShortCircuit() { } @Override - public boolean readZeroCopy(HdfsZeroCopyCursor buffers, - LocatedBlock curBlock, long blockPos, int toRead, + public ClientMmap getClientMmap(LocatedBlock curBlock, ClientMmapManager mmapManager) { - return false; + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 8c2bdf3c844..521fb70aa38 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.client.ClientMmap; import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; @@ -454,9 +455,8 @@ public boolean isShortCircuit() { } @Override - public boolean readZeroCopy(HdfsZeroCopyCursor buffers, - LocatedBlock curBlock, long blockPos, int toRead, + public ClientMmap getClientMmap(LocatedBlock curBlock, ClientMmapManager manager) { - return false; + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java index 7be519439e8..856e586e8e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/ClientMmapManager.java @@ -361,6 +361,10 @@ private ClientMmap create(Key key, FileInputStream in) throws IOException { } waitable.provide(mmap); } + if (LOG.isDebugEnabled()) { + LOG.info("created a new ClientMmap for block " + key.block + + " on datanode " + key.datanode); + } return mmap; } @@ -403,8 +407,10 @@ public ClientMmap fetch(DatanodeID datanodeID, ExtendedBlock block, finally { lock.unlock(); } - LOG.debug("reusing existing mmap with datanodeID=" + datanodeID + + if (LOG.isDebugEnabled()) { + LOG.debug("reusing existing mmap with datanodeID=" + datanodeID + ", " + "block=" + block); + } return mmap; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c index 39761b5a03d..6b80ea90a20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c @@ -32,6 +32,22 @@ int expectFileStats(hdfsFile file, { struct hdfsReadStatistics *stats = NULL; EXPECT_ZERO(hdfsFileGetReadStatistics(file, &stats)); + fprintf(stderr, "expectFileStats(expectedTotalBytesRead=%"PRId64", " + "expectedTotalLocalBytesRead=%"PRId64", " + "expectedTotalShortCircuitBytesRead=%"PRId64", " + "expectedTotalZeroCopyBytesRead=%"PRId64", " + "totalBytesRead=%"PRId64", " + "totalLocalBytesRead=%"PRId64", " + "totalShortCircuitBytesRead=%"PRId64", " + "totalZeroCopyBytesRead=%"PRId64")\n", + expectedTotalBytesRead, + expectedTotalLocalBytesRead, + expectedTotalShortCircuitBytesRead, + expectedTotalZeroCopyBytesRead, + stats->totalBytesRead, + stats->totalLocalBytesRead, + stats->totalShortCircuitBytesRead, + stats->totalZeroCopyBytesRead); if (expectedTotalBytesRead != UINT64_MAX) { EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c index 66799c82c43..b1f9bc8a79d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c @@ -39,7 +39,7 @@ #define JAVA_NET_ISA "java/net/InetSocketAddress" #define JAVA_NET_URI "java/net/URI" #define JAVA_STRING "java/lang/String" -#define HADOOP_ZERO_COPY_CURSOR "org/apache/hadoop/fs/ZeroCopyCursor" +#define READ_OPTION "org/apache/hadoop/fs/ReadOption" #define JAVA_VOID "V" @@ -2103,151 +2103,258 @@ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime) return 0; } -struct hadoopZeroCopyCursor* hadoopZeroCopyCursorAlloc(hdfsFile file) +/** + * Zero-copy options. + * + * We cache the EnumSet of ReadOptions which has to be passed into every + * readZero call, to avoid reconstructing it each time. This cache is cleared + * whenever an element changes. + */ +struct hadoopRzOptions { - int ret; - jobject zcursor = NULL; - jvalue jVal; - jthrowable jthr; - JNIEnv* env; + JNIEnv *env; + int skipChecksums; + jobject byteBufferPool; + jobject cachedEnumSet; +}; + +struct hadoopRzOptions *hadoopRzOptionsAlloc(void) +{ + struct hadoopRzOptions *opts; + JNIEnv *env; env = getJNIEnv(); - if (env == NULL) { + if (!env) { + // Check to make sure the JNI environment is set up properly. errno = EINTERNAL; return NULL; } - if (file->type != INPUT) { - ret = EINVAL; - goto done; + opts = calloc(1, sizeof(struct hadoopRzOptions)); + if (!opts) { + errno = ENOMEM; + return NULL; } - jthr = invokeMethod(env, &jVal, INSTANCE, (jobject)file->file, HADOOP_ISTRM, - "createZeroCopyCursor", "()L"HADOOP_ZERO_COPY_CURSOR";"); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyCursorAlloc: createZeroCopyCursor"); - goto done; - } - zcursor = (*env)->NewGlobalRef(env, jVal.l); - if (!zcursor) { - ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, - "hadoopZeroCopyCursorAlloc: NewGlobalRef"); - } - ret = 0; -done: - if (ret) { - errno = ret; - } - return (struct hadoopZeroCopyCursor*)zcursor; + return opts; } -int hadoopZeroCopyCursorSetFallbackBuffer(struct hadoopZeroCopyCursor* zcursor, - void *cbuf, uint32_t size) +static void hadoopRzOptionsClearCached(JNIEnv *env, + struct hadoopRzOptions *opts) { - int ret; - jobject buffer = NULL; - jthrowable jthr; - JNIEnv* env; - - env = getJNIEnv(); - if (env == NULL) { - errno = EINTERNAL; - return -1; - } - buffer = (*env)->NewDirectByteBuffer(env, cbuf, size); - if (!buffer) { - ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, - "hadoopZeroCopyCursorSetFallbackBuffer: NewDirectByteBuffer(" - "size=%"PRId32"):", size); - goto done; - } - jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "setFallbackBuffer", - "(Ljava/nio/ByteBuffer;)V", buffer); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyCursorSetFallbackBuffer: " - "FileSystem#setFallbackBuffer"); - goto done; - } - ret = 0; -done: - if (ret) { - (*env)->DeleteLocalRef(env, buffer); - errno = ret; - return -1; - } - return 0; -} - -int hadoopZeroCopyCursorSetSkipChecksums(struct hadoopZeroCopyCursor* zcursor, - int skipChecksums) -{ - JNIEnv* env; - jthrowable jthr; - jboolean shouldSkipChecksums = skipChecksums ? JNI_TRUE : JNI_FALSE; - - env = getJNIEnv(); - if (env == NULL) { - errno = EINTERNAL; - return -1; - } - jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "setSkipChecksums", "(Z)V", - shouldSkipChecksums); - if (jthr) { - errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyCursorSetSkipChecksums(): setSkipChecksums failed"); - return -1; - } - return 0; -} - -int hadoopZeroCopyCursorSetAllowShortReads( - struct hadoopZeroCopyCursor* zcursor, int allowShort) -{ - JNIEnv* env; - jthrowable jthr; - jboolean shouldAllowShort = allowShort ? JNI_TRUE : JNI_FALSE; - - env = getJNIEnv(); - if (env == NULL) { - errno = EINTERNAL; - return -1; - } - jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "setAllowShortReads", "(Z)V", - shouldAllowShort); - if (jthr) { - errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyCursorSetAllowShortReads(): setAllowShortReads " - "failed"); - return -1; - } - return 0; -} - -void hadoopZeroCopyCursorFree(struct hadoopZeroCopyCursor *zcursor) -{ - JNIEnv* env; - jthrowable jthr; - - env = getJNIEnv(); - if (env == NULL) { + if (!opts->cachedEnumSet) { return; } - jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "close", "()V"); - if (jthr) { - printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyCursorFree(): close failed"); - } - (*env)->DeleteGlobalRef(env, (jobject)zcursor); + (*env)->DeleteGlobalRef(env, opts->cachedEnumSet); + opts->cachedEnumSet = NULL; +} + +int hadoopRzOptionsSetSkipChecksum( + struct hadoopRzOptions *opts, int skip) +{ + JNIEnv *env; + env = getJNIEnv(); + if (!env) { + errno = EINTERNAL; + return -1; + } + hadoopRzOptionsClearCached(env, opts); + opts->skipChecksums = !!skip; + return 0; +} + +int hadoopRzOptionsSetByteBufferPool( + struct hadoopRzOptions *opts, const char *className) +{ + JNIEnv *env; + jthrowable jthr; + jobject byteBufferPool = NULL; + + env = getJNIEnv(); + if (!env) { + errno = EINTERNAL; + return -1; + } + + // Note: we don't have to call hadoopRzOptionsClearCached in this + // function, since the ByteBufferPool is passed separately from the + // EnumSet of ReadOptions. + + jthr = constructNewObjectOfClass(env, &byteBufferPool, className, "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopRzOptionsSetByteBufferPool(className=%s): ", className); + errno = EINVAL; + return -1; + } + if (opts->byteBufferPool) { + // Delete any previous ByteBufferPool we had. + (*env)->DeleteGlobalRef(env, opts->byteBufferPool); + } + opts->byteBufferPool = byteBufferPool; + return 0; +} + +void hadoopRzOptionsFree(struct hadoopRzOptions *opts) +{ + JNIEnv *env; + env = getJNIEnv(); + if (!env) { + return; + } + hadoopRzOptionsClearCached(env, opts); + if (opts->byteBufferPool) { + (*env)->DeleteGlobalRef(env, opts->byteBufferPool); + opts->byteBufferPool = NULL; + } + free(opts); +} + +struct hadoopRzBuffer +{ + jobject byteBuffer; + uint8_t *ptr; + int32_t length; + int direct; +}; + +static jthrowable hadoopRzOptionsGetEnumSet(JNIEnv *env, + struct hadoopRzOptions *opts, jobject *enumSet) +{ + jthrowable jthr = NULL; + jobject enumInst = NULL, enumSetObj = NULL; + jvalue jVal; + + if (opts->cachedEnumSet) { + // If we cached the value, return it now. + *enumSet = opts->cachedEnumSet; + goto done; + } + if (opts->skipChecksums) { + jthr = fetchEnumInstance(env, READ_OPTION, + "SKIP_CHECKSUMS", &enumInst); + if (jthr) { + goto done; + } + jthr = invokeMethod(env, &jVal, STATIC, NULL, + "java/util/EnumSet", "of", + "(Ljava/lang/Enum;)Ljava/util/EnumSet;", enumInst); + if (jthr) { + goto done; + } + enumSetObj = jVal.l; + } else { + jclass clazz = (*env)->FindClass(env, READ_OPTION); + if (!clazz) { + jthr = newRuntimeError(env, "failed " + "to find class for %s", READ_OPTION); + goto done; + } + jthr = invokeMethod(env, &jVal, STATIC, NULL, + "java/util/EnumSet", "noneOf", + "(Ljava/lang/Class;)Ljava/util/EnumSet;", clazz); + enumSetObj = jVal.l; + } + // create global ref + opts->cachedEnumSet = (*env)->NewGlobalRef(env, enumSetObj); + if (!opts->cachedEnumSet) { + jthr = getPendingExceptionAndClear(env); + goto done; + } + *enumSet = opts->cachedEnumSet; + jthr = NULL; +done: + (*env)->DeleteLocalRef(env, enumInst); + (*env)->DeleteLocalRef(env, enumSetObj); + return jthr; +} + +static int hadoopReadZeroExtractBuffer(JNIEnv *env, + const struct hadoopRzOptions *opts, struct hadoopRzBuffer *buffer) +{ + int ret; + jthrowable jthr; + jvalue jVal; + uint8_t *directStart; + void *mallocBuf = NULL; + jint position; + jarray array = NULL; + + jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer, + "java/nio/ByteBuffer", "remaining", "()I"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopReadZeroExtractBuffer: ByteBuffer#remaining failed: "); + goto done; + } + buffer->length = jVal.i; + jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer, + "java/nio/ByteBuffer", "position", "()I"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopReadZeroExtractBuffer: ByteBuffer#position failed: "); + goto done; + } + position = jVal.i; + directStart = (*env)->GetDirectBufferAddress(env, buffer->byteBuffer); + if (directStart) { + // Handle direct buffers. + buffer->ptr = directStart + position; + buffer->direct = 1; + ret = 0; + goto done; + } + // Handle indirect buffers. + // The JNI docs don't say that GetDirectBufferAddress throws any exceptions + // when it fails. However, they also don't clearly say that it doesn't. It + // seems safest to clear any pending exceptions here, to prevent problems on + // various JVMs. + (*env)->ExceptionClear(env); + if (!opts->byteBufferPool) { + fputs("hadoopReadZeroExtractBuffer: we read through the " + "zero-copy path, but failed to get the address of the buffer via " + "GetDirectBufferAddress. Please make sure your JVM supports " + "GetDirectBufferAddress.\n", stderr); + ret = ENOTSUP; + goto done; + } + // Get the backing array object of this buffer. + jthr = invokeMethod(env, &jVal, INSTANCE, buffer->byteBuffer, + "java/nio/ByteBuffer", "array", "()[B"); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopReadZeroExtractBuffer: ByteBuffer#array failed: "); + goto done; + } + array = jVal.l; + if (!array) { + fputs("hadoopReadZeroExtractBuffer: ByteBuffer#array returned NULL.", + stderr); + ret = EIO; + goto done; + } + mallocBuf = malloc(buffer->length); + if (!mallocBuf) { + fprintf(stderr, "hadoopReadZeroExtractBuffer: failed to allocate %d bytes of memory\n", + buffer->length); + ret = ENOMEM; + goto done; + } + (*env)->GetByteArrayRegion(env, array, position, buffer->length, mallocBuf); + jthr = (*env)->ExceptionOccurred(env); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopReadZeroExtractBuffer: GetByteArrayRegion failed: "); + goto done; + } + buffer->ptr = mallocBuf; + buffer->direct = 0; + ret = 0; + +done: + free(mallocBuf); + (*env)->DeleteLocalRef(env, array); + return ret; } -/** - * Translate an exception from ZeroCopyCursor#read, translate it into a return - * code. - */ static int translateZCRException(JNIEnv *env, jthrowable exc) { int ret; @@ -2255,16 +2362,12 @@ static int translateZCRException(JNIEnv *env, jthrowable exc) jthrowable jthr = classNameOfObject(exc, env, &className); if (jthr) { - fprintf(stderr, "hadoopZeroCopyRead: unknown " - "exception from read().\n"); - destroyLocalReference(env, jthr); + fputs("hadoopReadZero: failed to get class name of " + "exception from read().\n", stderr); + destroyLocalReference(env, exc); destroyLocalReference(env, jthr); ret = EIO; goto done; - } - if (!strcmp(className, "java.io.EOFException")) { - ret = 0; // EOF - goto done; } if (!strcmp(className, "java.lang.UnsupportedOperationException")) { ret = EPROTONOSUPPORT; @@ -2277,72 +2380,116 @@ done: return ret; } -int32_t hadoopZeroCopyRead(struct hadoopZeroCopyCursor *zcursor, - int32_t toRead, const void **data) +struct hadoopRzBuffer* hadoopReadZero(hdfsFile file, + struct hadoopRzOptions *opts, int32_t maxLength) { - int32_t ret, nRead = -1; - JNIEnv* env; - jthrowable jthr; - jobject byteBuffer = NULL; - uint8_t *addr; - jint position; + JNIEnv *env; + jthrowable jthr = NULL; jvalue jVal; - + jobject enumSet = NULL, byteBuffer = NULL; + struct hadoopRzBuffer* buffer = NULL; + int ret; + env = getJNIEnv(); - if (env == NULL) { + if (!env) { errno = EINTERNAL; - return -1; + return NULL; } - jthr = invokeMethod(env, NULL, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "read", "(I)V", toRead); + if (file->type != INPUT) { + fputs("Cannot read from a non-InputStream object!\n", stderr); + ret = EINVAL; + goto done; + } + buffer = calloc(1, sizeof(struct hadoopRzBuffer)); + if (!buffer) { + ret = ENOMEM; + goto done; + } + jthr = hadoopRzOptionsGetEnumSet(env, opts, &enumSet); + if (jthr) { + ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopReadZero: hadoopRzOptionsGetEnumSet failed: "); + goto done; + } + jthr = invokeMethod(env, &jVal, INSTANCE, file->file, HADOOP_ISTRM, "read", + "(Lorg/apache/hadoop/io/ByteBufferPool;ILjava/util/EnumSet;)" + "Ljava/nio/ByteBuffer;", opts->byteBufferPool, maxLength, enumSet); if (jthr) { ret = translateZCRException(env, jthr); goto done; } - jthr = invokeMethod(env, &jVal, INSTANCE, (jobject)zcursor, - HADOOP_ZERO_COPY_CURSOR, "getData", - "()Ljava/nio/ByteBuffer;"); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyRead(toRead=%"PRId32"): getData failed", - toRead); - goto done; - } byteBuffer = jVal.l; - addr = (*env)->GetDirectBufferAddress(env, byteBuffer); - if (!addr) { - fprintf(stderr, "hadoopZeroCopyRead(toRead=%"PRId32"): " - "failed to get direct buffer address.\n", toRead); - ret = EIO; - goto done; - } - jthr = invokeMethod(env, &jVal, INSTANCE, byteBuffer, - "java/nio/ByteBuffer", "position", "()I"); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyRead(toRead=%"PRId32"): ByteBuffer#position " - "failed", toRead); - goto done; - } - position = jVal.i; - jthr = invokeMethod(env, &jVal, INSTANCE, byteBuffer, - "java/nio/ByteBuffer", "remaining", "()I"); - if (jthr) { - ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL, - "hadoopZeroCopyRead(toRead=%"PRId32"): ByteBuffer#remaining " - "failed", toRead); - goto done; + if (!byteBuffer) { + buffer->byteBuffer = NULL; + buffer->length = 0; + buffer->ptr = NULL; + } else { + buffer->byteBuffer = (*env)->NewGlobalRef(env, byteBuffer); + if (!buffer->byteBuffer) { + ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, + "hadoopReadZero: failed to create global ref to ByteBuffer"); + goto done; + } + ret = hadoopReadZeroExtractBuffer(env, opts, buffer); + if (ret) { + goto done; + } } ret = 0; - nRead = jVal.i; - *data = addr + position; done: (*env)->DeleteLocalRef(env, byteBuffer); - if (nRead == -1) { + if (ret) { + if (buffer) { + if (buffer->byteBuffer) { + (*env)->DeleteGlobalRef(env, buffer->byteBuffer); + } + free(buffer); + } errno = ret; - return -1; + return NULL; + } else { + errno = 0; } - return nRead; + return buffer; +} + +int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer) +{ + return buffer->length; +} + +const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer) +{ + return buffer->ptr; +} + +void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer) +{ + jvalue jVal; + jthrowable jthr; + JNIEnv* env; + + env = getJNIEnv(); + if (env == NULL) { + errno = EINTERNAL; + return; + } + if (buffer->byteBuffer) { + jthr = invokeMethod(env, &jVal, INSTANCE, file->file, + HADOOP_ISTRM, "releaseBuffer", + "(Ljava/nio/ByteBuffer;)V", buffer->byteBuffer); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "hadoopRzBufferFree: releaseBuffer failed: "); + // even on error, we have to delete the reference. + } + (*env)->DeleteGlobalRef(env, buffer->byteBuffer); + } + if (!buffer->direct) { + free(buffer->ptr); + } + memset(buffer, 0, sizeof(*buffer)); + free(buffer); } char*** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h index 69fad082b69..f9f3840d471 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h @@ -36,6 +36,8 @@ #define EINTERNAL 255 #endif +#define ELASTIC_BYTE_BUFFER_POOL_CLASS \ + "org/apache/hadoop/io/ElasticByteBufferPool" /** All APIs set errno to meaningful values */ @@ -65,6 +67,10 @@ extern "C" { struct hdfsFile_internal; typedef struct hdfsFile_internal* hdfsFile; + struct hadoopRzOptions; + + struct hadoopRzBuffer; + /** * Determine if a file is open for read. * @@ -683,86 +689,104 @@ extern "C" { int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime); /** - * Create a zero-copy cursor object. + * Allocate a zero-copy options structure. * - * @param file The file to use for zero-copy reads. + * You must free all options structures allocated with this function using + * hadoopRzOptionsFree. * - * @return The zero-copy cursor, or NULL + errno on failure. + * @return A zero-copy options structure, or NULL if one could + * not be allocated. If NULL is returned, errno will + * contain the error number. */ - struct hadoopZeroCopyCursor* hadoopZeroCopyCursorAlloc(hdfsFile file); + struct hadoopRzOptions *hadoopRzOptionsAlloc(void); /** - * Set the fallback buffer which will be used by the zero copy object. + * Determine whether we should skip checksums in read0. * - * You are responsible for ensuring that this buffer stays valid until you - * either set a different buffer by calling this function again, or free the - * zero-copy cursor. + * @param opts The options structure. + * @param skip Nonzero to skip checksums sometimes; zero to always + * check them. * - * @param zcursor The zero-copy cursor. - * @param cbuf The buffer to use. - * @param size Size of the buffer. - * - * @return 0 on success. -1 on error. Errno will be set on - * error. + * @return 0 on success; -1 plus errno on failure. */ - int hadoopZeroCopyCursorSetFallbackBuffer( - struct hadoopZeroCopyCursor* zcursor, void *cbuf, uint32_t size); + int hadoopRzOptionsSetSkipChecksum( + struct hadoopRzOptions *opts, int skip); /** - * Set whether our cursor should skip checksums or not. + * Set the ByteBufferPool to use with read0. * - * @param zcursor The cursor - * @param skipChecksums Nonzero to skip checksums. + * @param opts The options structure. + * @param className If this is NULL, we will not use any + * ByteBufferPool. If this is non-NULL, it will be + * treated as the name of the pool class to use. + * For example, you can use + * ELASTIC_BYTE_BUFFER_POOL_CLASS. * - * @return -1 on error, 0 otherwise. + * @return 0 if the ByteBufferPool class was found and + * instantiated; + * -1 plus errno otherwise. */ - int hadoopZeroCopyCursorSetSkipChecksums( - struct hadoopZeroCopyCursor* zcursor, int skipChecksums); + int hadoopRzOptionsSetByteBufferPool( + struct hadoopRzOptions *opts, const char *className); /** - * Set whether our cursor should allow short reads to occur. - * Short reads will always occur if there is not enough data to read - * (i.e., at EOF), but normally we don't return them when reading other - * parts of the file. + * Free a hadoopRzOptionsFree structure. * - * @param zcursor The cursor - * @param skipChecksums Nonzero to skip checksums. - * - * @return -1 on error, 0 otherwise. + * @param opts The options structure to free. + * Any associated ByteBufferPool will also be freed. */ - int hadoopZeroCopyCursorSetAllowShortReads( - struct hadoopZeroCopyCursor* zcursor, int allowShort); + void hadoopRzOptionsFree(struct hadoopRzOptions *opts); /** - * Free zero-copy cursor. + * Perform a byte buffer read. + * If possible, this will be a zero-copy (mmap) read. * - * This will dispose of the cursor allocated by hadoopZeroCopyCursorAlloc, as - * well as any memory map that we have created. You must be done with the - * data returned from hadoopZeroCopyRead before calling this. + * @param file The file to read from. + * @param opts An options structure created by hadoopRzOptionsAlloc. + * @param maxLength The maximum length to read. We may read fewer bytes + * than this length. * - * @param zcursor The zero-copy cursor. + * @return On success, returns a new hadoopRzBuffer. + * This buffer will continue to be valid and readable + * until it is released by readZeroBufferFree. Failure to + * release a buffer will lead to a memory leak. + * + * NULL plus an errno code on an error. + * errno = EOPNOTSUPP indicates that we could not do a + * zero-copy read, and there was no ByteBufferPool + * supplied. */ - void hadoopZeroCopyCursorFree(struct hadoopZeroCopyCursor *zcursor); + struct hadoopRzBuffer* hadoopReadZero(hdfsFile file, + struct hadoopRzOptions *opts, int32_t maxLength); - /* - * Perform a zero-copy read. + /** + * Determine the length of the buffer returned from readZero. * - * @param zcursor The zero-copy cursor object. - * @param toRead The maximum amount to read. - * @param data (out param) on succesful return, a pointer to the - * data. This pointer will remain valid until the next - * call to hadoopZeroCopyRead, or until - * hadoopZeroCopyCursorFree is called on zcursor. - * - * @return -2 if zero-copy is unavailable, and - * -1 if there was an error. errno will be the error. - * 0 if we hit end-of-file without reading anything. - * The positive number of bytes read otherwise. Short - * reads will happen only if EOF is reached. - * The amount read otherwise. + * @param buffer a buffer returned from readZero. + * @return the length of the buffer. */ - int32_t hadoopZeroCopyRead(struct hadoopZeroCopyCursor *zcursor, - int32_t toRead, const void **data); + int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer); + + /** + * Get a pointer to the raw buffer returned from readZero. + * + * To find out how many bytes this buffer contains, call + * hadoopRzBufferLength. + * + * @param buffer a buffer returned from readZero. + * @return a pointer to the start of the buffer. This will be + * NULL when end-of-file has been reached. + */ + const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer); + + /** + * Release a buffer obtained through readZero. + * + * @param file The hdfs stream that created this buffer. This must be + * the same stream you called hadoopReadZero on. + * @param buffer The buffer to release. + */ + void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer); #ifdef __cplusplus } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c index 21ff9d9e0da..878289f96d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c @@ -647,3 +647,34 @@ done: (*env)->DeleteLocalRef(env, jvalue); return jthr; } + +jthrowable fetchEnumInstance(JNIEnv *env, const char *className, + const char *valueName, jobject *out) +{ + jclass clazz; + jfieldID fieldId; + jobject jEnum; + char prettyClass[256]; + + clazz = (*env)->FindClass(env, className); + if (!clazz) { + return newRuntimeError(env, "fetchEnum(%s, %s): failed to find class.", + className, valueName); + } + if (snprintf(prettyClass, sizeof(prettyClass), "L%s;", className) + >= sizeof(prettyClass)) { + return newRuntimeError(env, "fetchEnum(%s, %s): class name too long.", + className, valueName); + } + fieldId = (*env)->GetStaticFieldID(env, clazz, valueName, prettyClass); + if (!fieldId) { + return getPendingExceptionAndClear(env); + } + jEnum = (*env)->GetStaticObjectField(env, clazz, fieldId); + if (!jEnum) { + return getPendingExceptionAndClear(env); + } + *out = jEnum; + return NULL; +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h index c2a7409a9c4..c09f6a38cbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h @@ -140,6 +140,21 @@ int javaObjectIsOfClass(JNIEnv *env, jobject obj, const char *name); jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration, const char *key, const char *value); +/** + * Fetch an instance of an Enum. + * + * @param env The JNI environment. + * @param className The enum class name. + * @param valueName The name of the enum value + * @param out (out param) on success, a local reference to an + * instance of the enum object. (Since Java enums are + * singletones, this is also the only instance.) + * + * @return NULL on success; exception otherwise + */ +jthrowable fetchEnumInstance(JNIEnv *env, const char *className, + const char *valueName, jobject *out); + #endif /*LIBHDFS_JNI_HELPER_H*/ /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c index 0b34540ba95..b22fee12964 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c @@ -81,40 +81,49 @@ static void printBuf(const uint8_t *buf, size_t len) static int doTestZeroCopyReads(hdfsFS fs, const char *fileName) { hdfsFile file = NULL; - struct hadoopZeroCopyCursor *zcursor = NULL; - uint8_t *backingBuffer = NULL, *block; - const void *zcPtr; + struct hadoopRzOptions *opts = NULL; + struct hadoopRzBuffer *buffer = NULL; + uint8_t *block; file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0); EXPECT_NONNULL(file); - zcursor = hadoopZeroCopyCursorAlloc(file); - EXPECT_NONNULL(zcursor); + opts = hadoopRzOptionsAlloc(); + EXPECT_NONNULL(opts); + EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 1)); /* haven't read anything yet */ EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL)); block = getZeroCopyBlockData(0); EXPECT_NONNULL(block); /* first read is half of a block. */ + buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2); + EXPECT_NONNULL(buffer); EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, - hadoopZeroCopyRead(zcursor, - TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, &zcPtr)); - EXPECT_ZERO(memcmp(zcPtr, block, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + hadoopRzBufferLength(buffer)); + EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), block, + TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + hadoopRzBufferFree(file, buffer); /* read the next half of the block */ + buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2); + EXPECT_NONNULL(buffer); EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, - hadoopZeroCopyRead(zcursor, - TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2, &zcPtr)); - EXPECT_ZERO(memcmp(zcPtr, block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2), - TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + hadoopRzBufferLength(buffer)); + EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), + block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2), + TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2)); + hadoopRzBufferFree(file, buffer); free(block); EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, TEST_ZEROCOPY_FULL_BLOCK_SIZE, TEST_ZEROCOPY_FULL_BLOCK_SIZE, TEST_ZEROCOPY_FULL_BLOCK_SIZE)); /* Now let's read just a few bytes. */ - EXPECT_INT_EQ(SMALL_READ_LEN, - hadoopZeroCopyRead(zcursor, SMALL_READ_LEN, &zcPtr)); + buffer = hadoopReadZero(file, opts, SMALL_READ_LEN); + EXPECT_NONNULL(buffer); + EXPECT_INT_EQ(SMALL_READ_LEN, hadoopRzBufferLength(buffer)); block = getZeroCopyBlockData(1); EXPECT_NONNULL(block); - EXPECT_ZERO(memcmp(block, zcPtr, SMALL_READ_LEN)); + EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN)); + hadoopRzBufferFree(file, buffer); EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, hdfsTell(fs, file)); EXPECT_ZERO(expectFileStats(file, @@ -123,37 +132,36 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName) TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN, TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN)); - /* Try to read a full block's worth of data. This will cross the block - * boundary, which means we have to fall back to non-zero-copy reads. - * However, because we don't have a backing buffer, the fallback will fail - * with EPROTONOSUPPORT. */ - EXPECT_INT_EQ(-1, - hadoopZeroCopyRead(zcursor, TEST_ZEROCOPY_FULL_BLOCK_SIZE, &zcPtr)); + /* Clear 'skip checksums' and test that we can't do zero-copy reads any + * more. Since there is no ByteBufferPool set, we should fail with + * EPROTONOSUPPORT. + */ + EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0)); + EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE)); EXPECT_INT_EQ(EPROTONOSUPPORT, errno); - /* Now set a backing buffer and try again. It should succeed this time. */ - backingBuffer = malloc(ZC_BUF_LEN); - EXPECT_NONNULL(backingBuffer); - EXPECT_ZERO(hadoopZeroCopyCursorSetFallbackBuffer(zcursor, - backingBuffer, ZC_BUF_LEN)); - EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, - hadoopZeroCopyRead(zcursor, TEST_ZEROCOPY_FULL_BLOCK_SIZE, &zcPtr)); + /* Now set a ByteBufferPool and try again. It should succeed this time. */ + EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, + ELASTIC_BYTE_BUFFER_POOL_CLASS)); + buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE); + EXPECT_NONNULL(buffer); + EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, hadoopRzBufferLength(buffer)); EXPECT_ZERO(expectFileStats(file, (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN, TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN)); - EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, zcPtr, + EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, hadoopRzBufferGet(buffer), TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN)); free(block); block = getZeroCopyBlockData(2); EXPECT_NONNULL(block); - EXPECT_ZERO(memcmp(block, zcPtr + + EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) + (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN)); + hadoopRzBufferFree(file, buffer); free(block); - hadoopZeroCopyCursorFree(zcursor); + hadoopRzOptionsFree(opts); EXPECT_ZERO(hdfsCloseFile(fs, file)); - free(backingBuffer); return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java new file mode 100644 index 00000000000..c4045c35fb7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -0,0 +1,530 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeoutException; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.Random; + +import org.apache.commons.lang.SystemUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.ClientMmap; +import org.apache.hadoop.hdfs.client.ClientMmapManager; +import org.apache.hadoop.hdfs.client.HdfsDataInputStream; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.io.ByteBufferPool; +import org.apache.hadoop.io.ElasticByteBufferPool; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; +import org.apache.hadoop.net.unix.DomainSocket; +import org.apache.hadoop.net.unix.TemporarySocketDirectory; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Assert; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.base.Preconditions; +import com.google.common.base.Supplier; + +/** + * This class tests if EnhancedByteBufferAccess works correctly. + */ +public class TestEnhancedByteBufferAccess { + private static final Log LOG = + LogFactory.getLog(TestEnhancedByteBufferAccess.class.getName()); + + static TemporarySocketDirectory sockDir; + + @BeforeClass + public static void init() { + sockDir = new TemporarySocketDirectory(); + DomainSocket.disableBindPathValidation(); + } + + private static byte[] byteBufferToArray(ByteBuffer buf) { + byte resultArray[] = new byte[buf.remaining()]; + buf.get(resultArray); + buf.flip(); + return resultArray; + } + + public static HdfsConfiguration initZeroCopyTest() { + Assume.assumeTrue(NativeIO.isAvailable()); + Assume.assumeTrue(SystemUtils.IS_OS_UNIX); + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); + conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3); + conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100); + conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, + new File(sockDir.getDir(), + "TestRequestMmapAccess._PORT.sock").getAbsolutePath()); + conf.setBoolean(DFSConfigKeys. + DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true); + return conf; + } + + @Test + public void testZeroCopyReads() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + final int TEST_FILE_LENGTH = 12345; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + ByteBuffer result = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + Assert.assertEquals(4096, result.remaining()); + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + fsIn.releaseBuffer(result); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + @Test + public void testShortZeroCopyReads() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + final int TEST_FILE_LENGTH = 12345; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + + // Try to read 8192, but only get 4096 because of the block size. + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + ByteBuffer result = + dfsIn.read(null, 8192, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + Assert.assertEquals(4096, result.remaining()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + dfsIn.releaseBuffer(result); + + // Try to read 4097, but only get 4096 because of the block size. + result = + dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + Assert.assertEquals(4096, result.remaining()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192), + byteBufferToArray(result)); + dfsIn.releaseBuffer(result); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + @Test + public void testZeroCopyReadsNoFallback() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + FSDataInputStream fsIn = null; + final int TEST_FILE_LENGTH = 12345; + + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, 7567L); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; + ByteBuffer result; + try { + result = dfsIn.read(null, 4097, EnumSet.noneOf(ReadOption.class)); + Assert.fail("expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + // expected + } + result = dfsIn.read(null, 4096, EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + Assert.assertEquals(4096, result.remaining()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalBytesRead()); + Assert.assertEquals(4096, + dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), + byteBufferToArray(result)); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + private static class CountingVisitor + implements ClientMmapManager.ClientMmapVisitor { + int count = 0; + + @Override + public void accept(ClientMmap mmap) { + count++; + } + + public void reset() { + count = 0; + } + } + + @Test + public void testZeroCopyMmapCache() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + final int TEST_FILE_LENGTH = 16385; + final int RANDOM_SEED = 23453; + FSDataInputStream fsIn = null; + ByteBuffer results[] = { null, null, null, null, null }; + + DistributedFileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, RANDOM_SEED); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + final ClientMmapManager mmapManager = fs.getClient().getMmapManager(); + final CountingVisitor countingVisitor = new CountingVisitor(); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + results[0] = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + fsIn.seek(0); + results[1] = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(1, countingVisitor.count); + countingVisitor.reset(); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + countingVisitor.reset(); + + // The mmaps should be of the first block of the file. + final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, TEST_PATH); + mmapManager.visitMmaps(new ClientMmapManager.ClientMmapVisitor() { + @Override + public void accept(ClientMmap mmap) { + Assert.assertEquals(firstBlock, mmap.getBlock()); + } + }); + + // Read more blocks. + results[2] = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + results[3] = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + try { + results[4] = fsIn.read(null, 4096, + EnumSet.of(ReadOption.SKIP_CHECKSUMS)); + Assert.fail("expected UnsupportedOperationException"); + } catch (UnsupportedOperationException e) { + // expected + } + + // we should have 3 mmaps, 0 evictable + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(3, countingVisitor.count); + countingVisitor.reset(); + mmapManager.visitEvictable(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + + // After we close the cursors, the mmaps should be evictable for + // a brief period of time. Then, they should be closed (we're + // using a very quick timeout) + for (ByteBuffer buffer : results) { + if (buffer != null) { + fsIn.releaseBuffer(buffer); + } + } + GenericTestUtils.waitFor(new Supplier() { + public Boolean get() { + countingVisitor.reset(); + try { + mmapManager.visitEvictable(countingVisitor); + } catch (InterruptedException e) { + e.printStackTrace(); + return false; + } + return (0 == countingVisitor.count); + } + }, 10, 10000); + countingVisitor.reset(); + mmapManager.visitMmaps(countingVisitor); + Assert.assertEquals(0, countingVisitor.count); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + /** + * Test HDFS fallback reads. HDFS streams support the ByteBufferReadable + * interface. + */ + @Test + public void testHdfsFallbackReads() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + final int TEST_FILE_LENGTH = 16385; + final int RANDOM_SEED = 23453; + FSDataInputStream fsIn = null; + + DistributedFileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, RANDOM_SEED); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + testFallbackImpl(fsIn, original); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + private static class RestrictedAllocatingByteBufferPool + implements ByteBufferPool { + private final boolean direct; + + RestrictedAllocatingByteBufferPool(boolean direct) { + this.direct = direct; + } + @Override + public ByteBuffer getBuffer(boolean direct, int length) { + Preconditions.checkArgument(this.direct == direct); + return direct ? ByteBuffer.allocateDirect(length) : + ByteBuffer.allocate(length); + } + @Override + public void putBuffer(ByteBuffer buffer) { + } + } + + private static void testFallbackImpl(InputStream stream, + byte original[]) throws Exception { + RestrictedAllocatingByteBufferPool bufferPool = + new RestrictedAllocatingByteBufferPool( + stream instanceof ByteBufferReadable); + + ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10); + Assert.assertEquals(10, result.remaining()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10), + byteBufferToArray(result)); + + result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000); + Assert.assertEquals(5000, result.remaining()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010), + byteBufferToArray(result)); + + result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999); + Assert.assertEquals(11375, result.remaining()); + Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385), + byteBufferToArray(result)); + + result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10); + Assert.assertNull(result); + } + + /** + * Test the {@link ByteBufferUtil#fallbackRead} function directly. + */ + @Test + public void testFallbackRead() throws Exception { + HdfsConfiguration conf = initZeroCopyTest(); + MiniDFSCluster cluster = null; + final Path TEST_PATH = new Path("/a"); + final int TEST_FILE_LENGTH = 16385; + final int RANDOM_SEED = 23453; + FSDataInputStream fsIn = null; + + DistributedFileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + DFSTestUtil.createFile(fs, TEST_PATH, + TEST_FILE_LENGTH, (short)1, RANDOM_SEED); + try { + DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); + } catch (InterruptedException e) { + Assert.fail("unexpected InterruptedException during " + + "waitReplication: " + e); + } catch (TimeoutException e) { + Assert.fail("unexpected TimeoutException during " + + "waitReplication: " + e); + } + fsIn = fs.open(TEST_PATH); + byte original[] = new byte[TEST_FILE_LENGTH]; + IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); + fsIn.close(); + fsIn = fs.open(TEST_PATH); + testFallbackImpl(fsIn, original); + } finally { + if (fsIn != null) fsIn.close(); + if (fs != null) fs.close(); + if (cluster != null) cluster.shutdown(); + } + } + + /** + * Test fallback reads on a stream which does not support the + * ByteBufferReadable * interface. + */ + @Test + public void testIndirectFallbackReads() throws Exception { + final File TEST_DIR = new File( + System.getProperty("test.build.data","build/test/data")); + final String TEST_PATH = TEST_DIR + File.separator + + "indirectFallbackTestFile"; + final int TEST_FILE_LENGTH = 16385; + final int RANDOM_SEED = 23453; + FileOutputStream fos = null; + FileInputStream fis = null; + try { + fos = new FileOutputStream(TEST_PATH); + Random random = new Random(RANDOM_SEED); + byte original[] = new byte[TEST_FILE_LENGTH]; + random.nextBytes(original); + fos.write(original); + fos.close(); + fos = null; + fis = new FileInputStream(TEST_PATH); + testFallbackImpl(fis, original); + } finally { + IOUtils.cleanup(LOG, fos, fis); + new File(TEST_PATH).delete(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java index 5015a56a42a..57f5ce979ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java @@ -23,24 +23,18 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.concurrent.TimeoutException; -import org.apache.commons.lang.SystemUtils; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.ZeroCopyCursor; -import org.apache.hadoop.hdfs.client.ClientMmap; -import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; -import org.apache.hadoop.io.nativeio.NativeIO; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; @@ -445,322 +439,4 @@ private void testStatistics(boolean isShortCircuit) throws Exception { if (sockDir != null) sockDir.close(); } } - - private static byte[] byteBufferToArray(ByteBuffer buf) { - byte resultArray[] = new byte[buf.remaining()]; - buf.get(resultArray); - return resultArray; - } - - public static HdfsConfiguration initZeroCopyTest() { - Assume.assumeTrue(NativeIO.isAvailable()); - Assume.assumeTrue(SystemUtils.IS_OS_UNIX); - HdfsConfiguration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); - sockDir = new TemporarySocketDirectory(); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); - conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3); - conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100); - conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, - new File(sockDir.getDir(), - "TestRequestMmapAccess._PORT.sock").getAbsolutePath()); - conf.setBoolean(DFSConfigKeys. - DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true); - return conf; - } - - @Test - public void testZeroCopyReads() throws Exception { - HdfsConfiguration conf = initZeroCopyTest(); - MiniDFSCluster cluster = null; - final Path TEST_PATH = new Path("/a"); - FSDataInputStream fsIn = null; - ZeroCopyCursor zcursor = null; - - FileSystem fs = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, TEST_PATH, - BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); - try { - DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); - } catch (InterruptedException e) { - Assert.fail("unexpected InterruptedException during " + - "waitReplication: " + e); - } catch (TimeoutException e) { - Assert.fail("unexpected TimeoutException during " + - "waitReplication: " + e); - } - fsIn = fs.open(TEST_PATH); - byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; - IOUtils.readFully(fsIn, original, 0, - BlockReaderLocalTest.TEST_LENGTH); - fsIn.close(); - fsIn = fs.open(TEST_PATH); - zcursor = fsIn.createZeroCopyCursor(); - zcursor.setFallbackBuffer(ByteBuffer. - allocateDirect(1024 * 1024 * 4)); - HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; - zcursor.read(4096); - ByteBuffer result = zcursor.getData(); - Assert.assertEquals(4096, result.remaining()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalBytesRead()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); - Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), - byteBufferToArray(result)); - } finally { - if (zcursor != null) zcursor.close(); - if (fsIn != null) fsIn.close(); - if (fs != null) fs.close(); - if (cluster != null) cluster.shutdown(); - } - } - - @Test - public void testShortZeroCopyReads() throws Exception { - HdfsConfiguration conf = initZeroCopyTest(); - MiniDFSCluster cluster = null; - final Path TEST_PATH = new Path("/a"); - FSDataInputStream fsIn = null; - ZeroCopyCursor zcursor = null; - - FileSystem fs = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, TEST_PATH, - BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); - try { - DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); - } catch (InterruptedException e) { - Assert.fail("unexpected InterruptedException during " + - "waitReplication: " + e); - } catch (TimeoutException e) { - Assert.fail("unexpected TimeoutException during " + - "waitReplication: " + e); - } - fsIn = fs.open(TEST_PATH); - byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; - IOUtils.readFully(fsIn, original, 0, - BlockReaderLocalTest.TEST_LENGTH); - fsIn.close(); - fsIn = fs.open(TEST_PATH); - zcursor = fsIn.createZeroCopyCursor(); - zcursor.setFallbackBuffer(ByteBuffer. - allocateDirect(1024 * 1024 * 4)); - zcursor.setAllowShortReads(true); - HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; - zcursor.read(8192); - ByteBuffer result = zcursor.getData(); - Assert.assertEquals(4096, result.remaining()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalBytesRead()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); - Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), - byteBufferToArray(result)); - zcursor.read(4097); - result = zcursor.getData(); - Assert.assertEquals(4096, result.remaining()); - Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192), - byteBufferToArray(result)); - zcursor.setAllowShortReads(false); - zcursor.read(4100); - result = zcursor.getData(); - Assert.assertEquals(4100, result.remaining()); - - Assert.assertArrayEquals(Arrays.copyOfRange(original, 8192, 12292), - byteBufferToArray(result)); - } finally { - if (zcursor != null) zcursor.close(); - if (fsIn != null) fsIn.close(); - if (fs != null) fs.close(); - if (cluster != null) cluster.shutdown(); - } - } - - @Test - public void testZeroCopyReadsNoBackingBuffer() throws Exception { - HdfsConfiguration conf = initZeroCopyTest(); - MiniDFSCluster cluster = null; - final Path TEST_PATH = new Path("/a"); - FSDataInputStream fsIn = null; - ZeroCopyCursor zcursor = null; - - FileSystem fs = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, TEST_PATH, - BlockReaderLocalTest.TEST_LENGTH, (short)1, 7567L); - try { - DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); - } catch (InterruptedException e) { - Assert.fail("unexpected InterruptedException during " + - "waitReplication: " + e); - } catch (TimeoutException e) { - Assert.fail("unexpected TimeoutException during " + - "waitReplication: " + e); - } - fsIn = fs.open(TEST_PATH); - byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH]; - IOUtils.readFully(fsIn, original, 0, - BlockReaderLocalTest.TEST_LENGTH); - fsIn.close(); - fsIn = fs.open(TEST_PATH); - zcursor = fsIn.createZeroCopyCursor(); - zcursor.setAllowShortReads(false); - HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn; - // This read is longer than the file, and we do not have short reads enabled. - try { - zcursor.read(8192); - Assert.fail("expected UnsupportedOperationException"); - } catch (UnsupportedOperationException e) { - // expected - } - // This read is longer than the block, and we do not have short reads enabled. - try { - zcursor.read(4097); - Assert.fail("expected UnsupportedOperationException"); - } catch (UnsupportedOperationException e) { - // expected - } - // This read should succeed. - zcursor.read(4096); - ByteBuffer result = zcursor.getData(); - Assert.assertEquals(4096, result.remaining()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalBytesRead()); - Assert.assertEquals(4096, - dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); - Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096), - byteBufferToArray(result)); - } finally { - if (zcursor != null) zcursor.close(); - if (fsIn != null) fsIn.close(); - if (fs != null) fs.close(); - if (cluster != null) cluster.shutdown(); - } - } - - private static class CountingVisitor - implements ClientMmapManager.ClientMmapVisitor { - int count = 0; - - @Override - public void accept(ClientMmap mmap) { - count++; - } - - public void reset() { - count = 0; - } - } - - @Test - public void testZeroCopyMmapCache() throws Exception { - HdfsConfiguration conf = initZeroCopyTest(); - MiniDFSCluster cluster = null; - final Path TEST_PATH = new Path("/a"); - final int TEST_FILE_LENGTH = 16385; - final int RANDOM_SEED = 23453; - FSDataInputStream fsIn = null; - ZeroCopyCursor zcursor[] = { null, null, null, null, null }; - - DistributedFileSystem fs = null; - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - cluster.waitActive(); - fs = cluster.getFileSystem(); - DFSTestUtil.createFile(fs, TEST_PATH, - TEST_FILE_LENGTH, (short)1, RANDOM_SEED); - try { - DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1); - } catch (InterruptedException e) { - Assert.fail("unexpected InterruptedException during " + - "waitReplication: " + e); - } catch (TimeoutException e) { - Assert.fail("unexpected TimeoutException during " + - "waitReplication: " + e); - } - fsIn = fs.open(TEST_PATH); - byte original[] = new byte[TEST_FILE_LENGTH]; - IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH); - fsIn.close(); - fsIn = fs.open(TEST_PATH); - for (int i = 0; i < zcursor.length; i++) { - zcursor[i] = fsIn.createZeroCopyCursor(); - zcursor[i].setAllowShortReads(false); - } - ClientMmapManager mmapManager = fs.getClient().getMmapManager(); - CountingVisitor countingVisitor = new CountingVisitor(); - mmapManager.visitMmaps(countingVisitor); - Assert.assertEquals(0, countingVisitor.count); - mmapManager.visitEvictable(countingVisitor); - Assert.assertEquals(0, countingVisitor.count); - zcursor[0].read(4096); - fsIn.seek(0); - zcursor[1].read(4096); - mmapManager.visitMmaps(countingVisitor); - Assert.assertEquals(1, countingVisitor.count); - countingVisitor.reset(); - mmapManager.visitEvictable(countingVisitor); - Assert.assertEquals(0, countingVisitor.count); - countingVisitor.reset(); - - // The mmaps should be of the first block of the file. - final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, TEST_PATH); - mmapManager.visitMmaps(new ClientMmapManager.ClientMmapVisitor() { - @Override - public void accept(ClientMmap mmap) { - Assert.assertEquals(firstBlock, mmap.getBlock()); - } - }); - - // Read more blocks. - zcursor[2].read(4096); - zcursor[3].read(4096); - try { - zcursor[4].read(4096); - Assert.fail("expected UnsupportedOperationException"); - } catch (UnsupportedOperationException e) { - // expected - } - - // we should have 3 mmaps, 0 evictable - mmapManager.visitMmaps(countingVisitor); - Assert.assertEquals(3, countingVisitor.count); - countingVisitor.reset(); - mmapManager.visitEvictable(countingVisitor); - Assert.assertEquals(0, countingVisitor.count); - - // After we close the cursors, the mmaps should be evictable for - // a brief period of time. Then, they should be closed (we're - // using a very quick timeout) - for (int i = 0; i < zcursor.length; i++) { - IOUtils.closeStream(zcursor[i]); - } - while (true) { - countingVisitor.reset(); - mmapManager.visitEvictable(countingVisitor); - if (0 == countingVisitor.count) { - break; - } - } - countingVisitor.reset(); - mmapManager.visitMmaps(countingVisitor); - Assert.assertEquals(0, countingVisitor.count); - } finally { - if (fsIn != null) fsIn.close(); - if (fs != null) fs.close(); - if (cluster != null) cluster.shutdown(); - } - - } } From 8f17d645273a30f829bf16f22d8a7c6a9710d627 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 26 Sep 2013 20:30:14 +0000 Subject: [PATCH 26/51] HDFS-5266. ElasticByteBufferPool#Key does not implement equals. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1526671 13f79535-47bb-0310-9956-ffa450edef68 --- .../main/java/org/apache/hadoop/io/ByteBufferPool.java | 2 +- .../org/apache/hadoop/io/ElasticByteBufferPool.java | 10 ++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java index bb2c978e092..aa5f8731c54 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java @@ -31,7 +31,7 @@ public interface ByteBufferPool { * new buffer. * * @param direct Whether the buffer should be direct. - * @param minLength The minimum length the buffer will have. + * @param length The minimum length the buffer will have. * @return A new ByteBuffer. This ByteBuffer must be direct. * Its capacity can be less than what was requested, but * must be at least 1 byte. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java index f1ea9ff6b5b..c4b4491ec3a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -52,6 +52,16 @@ public int compareTo(Key other) { compare(insertionTime, other.insertionTime). result(); } + + @Override + public boolean equals(Object rhs) { + try { + Key o = (Key)rhs; + return (compareTo(o) == 0); + } catch (ClassCastException e) { + return false; + } + } } private final TreeMap buffers = diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 51af0b7aa3f..cc6737bc97f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -64,3 +64,5 @@ HDFS-4949 (Unreleased) HDFS-5210. Fix some failing unit tests on HDFS-4949 branch. (Contributed by Andrew Wang) + + HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth) From 89c147d46f6bc4f8ca78308b76631ac97a79376b Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 27 Sep 2013 18:32:18 +0000 Subject: [PATCH 27/51] HDFS-5266. Addendum for addressing Findbugs warnings for lack of hashCode method and lack of null check in equals. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1527023 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/io/ElasticByteBufferPool.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java index c4b4491ec3a..694fcbebcf6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java @@ -18,6 +18,7 @@ package org.apache.hadoop.io; import com.google.common.collect.ComparisonChain; +import org.apache.commons.lang.builder.HashCodeBuilder; import java.nio.ByteBuffer; import java.util.Map; @@ -55,6 +56,9 @@ public int compareTo(Key other) { @Override public boolean equals(Object rhs) { + if (rhs == null) { + return false; + } try { Key o = (Key)rhs; return (compareTo(o) == 0); @@ -62,6 +66,14 @@ public boolean equals(Object rhs) { return false; } } + + @Override + public int hashCode() { + return new HashCodeBuilder(). + append(capacity). + append(insertionTime). + toHashCode(); + } } private final TreeMap buffers = From af1ac9a5e8d8d97a855940d853dd59ab4666f6e2 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 4 Oct 2013 17:46:18 +0000 Subject: [PATCH 28/51] HDFS-5119. Persist CacheManager state in the edit log. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1529238 13f79535-47bb-0310-9956-ffa450edef68 --- .../main/java/org/apache/hadoop/io/Text.java | 5 +- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/protocol/CachePoolInfo.java | 46 +++ .../hadoop/hdfs/protocol/LayoutVersion.java | 3 +- .../hdfs/protocol/PathBasedCacheEntry.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 305 +++++++++++++--- .../hdfs/server/namenode/CachePool.java | 53 ++- .../hdfs/server/namenode/FSEditLog.java | 56 ++- .../hdfs/server/namenode/FSEditLogLoader.java | 58 +++ .../hdfs/server/namenode/FSEditLogOp.java | 344 +++++++++++++++++- .../server/namenode/FSEditLogOpCodes.java | 8 +- .../hdfs/server/namenode/FSImageFormat.java | 12 + .../hdfs/server/namenode/FSNamesystem.java | 30 +- .../namenode/startupprogress/StepType.java | 12 +- .../ImageLoaderCurrent.java | 39 +- .../offlineImageViewer/ImageVisitor.java | 13 +- .../TestCacheReplicationManager.java | 85 ++++- .../namenode/OfflineEditsViewerHelper.java | 19 + .../src/test/resources/editsStored.xml | 274 ++++++++------ 19 files changed, 1153 insertions(+), 214 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java index a5c8b1ecd5c..e4490f1e34e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java @@ -454,10 +454,7 @@ public static ByteBuffer encode(String string, boolean replace) /** Read a UTF8 encoded string from in */ public static String readString(DataInput in) throws IOException { - int length = WritableUtils.readVInt(in); - byte [] bytes = new byte[length]; - in.readFully(bytes, 0, length); - return decode(bytes); + return readString(in, Integer.MAX_VALUE); } /** Read a UTF8 encoded string with a maximum size diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index cc6737bc97f..c9b84d9aa66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -48,6 +48,9 @@ HDFS-4949 (Unreleased) HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more intuitive. (Contributed by Colin Patrick McCabe) + HDFS-5119. Persist CacheManager state in the edit log. + (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index c07274b35a2..d6894a7c044 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.protocol; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import javax.annotation.Nullable; @@ -27,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.Text; /** * Information about a cache pool. @@ -145,4 +148,47 @@ public static void validateName(String poolName) throws IOException { throw new IOException("invalid empty cache pool name"); } } + + public static CachePoolInfo readFrom(DataInput in) throws IOException { + String poolName = Text.readString(in); + CachePoolInfo info = new CachePoolInfo(poolName); + if (in.readBoolean()) { + info.setOwnerName(Text.readString(in)); + } + if (in.readBoolean()) { + info.setGroupName(Text.readString(in)); + } + if (in.readBoolean()) { + info.setMode(FsPermission.read(in)); + } + if (in.readBoolean()) { + info.setWeight(in.readInt()); + } + return info; + } + + public void writeTo(DataOutput out) throws IOException { + Text.writeString(out, poolName); + boolean hasOwner, hasGroup, hasMode, hasWeight; + hasOwner = ownerName != null; + hasGroup = groupName != null; + hasMode = mode != null; + hasWeight = weight != null; + out.writeBoolean(hasOwner); + if (hasOwner) { + Text.writeString(out, ownerName); + } + out.writeBoolean(hasGroup); + if (hasGroup) { + Text.writeString(out, groupName); + } + out.writeBoolean(hasMode); + if (hasMode) { + mode.write(out); + } + out.writeBoolean(hasWeight); + if (hasWeight) { + out.writeInt(weight); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java index d4c62c4c710..09333e5e21e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java @@ -106,7 +106,8 @@ public static enum Feature { SEQUENTIAL_BLOCK_ID(-46, "Allocate block IDs sequentially and store " + "block IDs in the edits log and image files"), EDITLOG_SUPPORT_RETRYCACHE(-47, "Record ClientId and CallId in editlog to " - + "enable rebuilding retry cache in case of HA failover"); + + "enable rebuilding retry cache in case of HA failover"), + CACHING(-48, "Support for cache pools and path-based caching"); final int lv; final int ancestorLV; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index 292c3f563c6..b4bd1545e3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -65,6 +65,6 @@ public String toString() { } public PathBasedCacheDescriptor getDescriptor() { - return new PathBasedCacheDescriptor(entryId, path, pool.getName()); + return new PathBasedCacheDescriptor(entryId, path, pool.getPoolName()); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index ad24227aa06..9e1000934c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; @@ -36,17 +38,24 @@ import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; +import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; +import org.apache.hadoop.io.Text; + +import com.google.common.base.Preconditions; /** * The Cache Manager handles caching on DataNodes. @@ -94,7 +103,6 @@ public final class CacheManager { final private FSDirectory dir; CacheManager(FSNamesystem namesystem, FSDirectory dir, Configuration conf) { - // TODO: support loading and storing of the CacheManager state clear(); this.namesystem = namesystem; this.dir = dir; @@ -113,13 +121,20 @@ synchronized void clear() { nextEntryId = 1; } - synchronized long getNextEntryId() throws IOException { - if (nextEntryId == Long.MAX_VALUE) { - throw new IOException("no more available IDs"); - } + /** + * Returns the next entry ID to be used for a PathBasedCacheEntry + */ + synchronized long getNextEntryId() { + Preconditions.checkArgument(nextEntryId != Long.MAX_VALUE); return nextEntryId++; } + /** + * Returns the PathBasedCacheEntry corresponding to a PathBasedCacheEntry. + * + * @param directive Lookup directive + * @return Corresponding PathBasedCacheEntry, or null if not present. + */ private synchronized PathBasedCacheEntry findEntry(PathBasedCacheDirective directive) { List existing = @@ -128,13 +143,60 @@ synchronized long getNextEntryId() throws IOException { return null; } for (PathBasedCacheEntry entry : existing) { - if (entry.getPool().getName().equals(directive.getPool())) { + if (entry.getPool().getPoolName().equals(directive.getPool())) { return entry; } } return null; } + /** + * Add a new PathBasedCacheEntry, skipping any validation checks. Called + * directly when reloading CacheManager state from FSImage. + * + * @throws IOException if unable to cache the entry + */ + private void unprotectedAddEntry(PathBasedCacheEntry entry) + throws IOException { + assert namesystem.hasWriteLock(); + // Add it to the various maps + entriesById.put(entry.getEntryId(), entry); + String path = entry.getPath(); + List entryList = entriesByPath.get(path); + if (entryList == null) { + entryList = new ArrayList(1); + entriesByPath.put(path, entryList); + } + entryList.add(entry); + // Set the path as cached in the namesystem + try { + INode node = dir.getINode(entry.getPath()); + if (node != null && node.isFile()) { + INodeFile file = node.asFile(); + // TODO: adjustable cache replication factor + namesystem.setCacheReplicationInt(entry.getPath(), + file.getBlockReplication()); + } else { + LOG.warn("Path " + entry.getPath() + " is not a file"); + } + } catch (IOException ioe) { + LOG.info("unprotectedAddEntry " + entry +": failed to cache file: " + + ioe.getClass().getName() +": " + ioe.getMessage()); + throw ioe; + } + } + + /** + * Add a new PathBasedCacheDirective if valid, returning a corresponding + * PathBasedCacheDescriptor to the user. + * + * @param directive Directive describing the cache entry being added + * @param pc Permission checker used to validate that the calling user has + * access to the destination cache pool + * @return Corresponding PathBasedCacheDescriptor for the new cache entry + * @throws IOException if the directive is invalid or was otherwise + * unsuccessful + */ public synchronized PathBasedCacheDescriptor addDirective( PathBasedCacheDirective directive, FSPermissionChecker pc) throws IOException { @@ -162,47 +224,44 @@ public synchronized PathBasedCacheDescriptor addDirective( "existing directive " + existing + " in this pool."); return existing.getDescriptor(); } - // Add a new entry with the next available ID. - PathBasedCacheEntry entry; - try { - entry = new PathBasedCacheEntry(getNextEntryId(), - directive.getPath(), pool); - } catch (IOException ioe) { - throw new UnexpectedAddPathBasedCacheDirectiveException(directive); - } - LOG.info("addDirective " + directive + ": added cache directive " - + directive); // Success! - // First, add it to the various maps - entriesById.put(entry.getEntryId(), entry); - String path = directive.getPath(); - List entryList = entriesByPath.get(path); - if (entryList == null) { - entryList = new ArrayList(1); - entriesByPath.put(path, entryList); - } - entryList.add(entry); + PathBasedCacheDescriptor d = unprotectedAddDirective(directive); + LOG.info("addDirective " + directive + ": added cache directive " + + directive); + return d; + } + + /** + * Assigns a new entry ID to a validated PathBasedCacheDirective and adds + * it to the CacheManager. Called directly when replaying the edit log. + * + * @param directive Directive being added + * @return PathBasedCacheDescriptor for the directive + * @throws IOException + */ + PathBasedCacheDescriptor unprotectedAddDirective( + PathBasedCacheDirective directive) throws IOException { + assert namesystem.hasWriteLock(); + CachePool pool = cachePools.get(directive.getPool()); + // Add a new entry with the next available ID. + PathBasedCacheEntry entry; + entry = new PathBasedCacheEntry(getNextEntryId(), directive.getPath(), + pool); + + unprotectedAddEntry(entry); - // Next, set the path as cached in the namesystem - try { - INode node = dir.getINode(directive.getPath()); - if (node != null && node.isFile()) { - INodeFile file = node.asFile(); - // TODO: adjustable cache replication factor - namesystem.setCacheReplicationInt(directive.getPath(), - file.getBlockReplication()); - } else { - LOG.warn("Path " + directive.getPath() + " is not a file"); - } - } catch (IOException ioe) { - LOG.info("addDirective " + directive +": failed to cache file: " + - ioe.getClass().getName() +": " + ioe.getMessage()); - throw ioe; - } return entry.getDescriptor(); } + /** + * Remove the PathBasedCacheEntry corresponding to a descriptor ID from + * the CacheManager. + * + * @param id of the PathBasedCacheDescriptor + * @param pc Permissions checker used to validated the request + * @throws IOException + */ public synchronized void removeDescriptor(long id, FSPermissionChecker pc) throws IOException { // Check for invalid IDs. @@ -229,6 +288,20 @@ public synchronized void removeDescriptor(long id, FSPermissionChecker pc) throw new RemovePermissionDeniedException(id); } + unprotectedRemoveDescriptor(id); + } + + /** + * Unchecked internal method used to remove a PathBasedCacheEntry from the + * CacheManager. Called directly when replaying the edit log. + * + * @param id of the PathBasedCacheDescriptor corresponding to the entry that + * is being removed + * @throws IOException + */ + void unprotectedRemoveDescriptor(long id) throws IOException { + assert namesystem.hasWriteLock(); + PathBasedCacheEntry existing = entriesById.get(id); // Remove the corresponding entry in entriesByPath. String path = existing.getDescriptor().getPath(); List entries = entriesByPath.get(path); @@ -294,11 +367,11 @@ public synchronized void removeDescriptor(long id, FSPermissionChecker pc) * Create a cache pool. * * Only the superuser should be able to call this function. - * - * @param info - * The info for the cache pool to create. + * + * @param info The info for the cache pool to create. + * @return the created CachePool */ - public synchronized void addCachePool(CachePoolInfo info) + public synchronized CachePool addCachePool(CachePoolInfo info) throws IOException { CachePoolInfo.validate(info); String poolName = info.getPoolName(); @@ -309,8 +382,20 @@ public synchronized void addCachePool(CachePoolInfo info) CachePool cachePool = new CachePool(poolName, info.getOwnerName(), info.getGroupName(), info.getMode(), info.getWeight()); - cachePools.put(poolName, cachePool); - LOG.info("created new cache pool " + cachePool); + unprotectedAddCachePool(cachePool); + return cachePool; + } + + /** + * Internal unchecked method used to add a CachePool. Called directly when + * reloading CacheManager state from the FSImage or edit log. + * + * @param pool to be added + */ + void unprotectedAddCachePool(CachePool pool) { + assert namesystem.hasWriteLock(); + cachePools.put(pool.getPoolName(), pool); + LOG.info("created new cache pool " + pool); } /** @@ -409,4 +494,116 @@ public synchronized void removeCachePool(String poolName) } return new BatchedListEntries(results, false); } + + /* + * FSImage related serialization and deserialization code + */ + + /** + * Saves the current state of the CacheManager to the DataOutput. Used + * to persist CacheManager state in the FSImage. + * @param out DataOutput to persist state + * @param sdPath path of the storage directory + * @throws IOException + */ + public synchronized void saveState(DataOutput out, String sdPath) + throws IOException { + out.writeLong(nextEntryId); + savePools(out, sdPath); + saveEntries(out, sdPath); + } + + /** + * Reloads CacheManager state from the passed DataInput. Used during namenode + * startup to restore CacheManager state from an FSImage. + * @param in DataInput from which to restore state + * @throws IOException + */ + public synchronized void loadState(DataInput in) throws IOException { + nextEntryId = in.readLong(); + // pools need to be loaded first since entries point to their parent pool + loadPools(in); + loadEntries(in); + } + + /** + * Save cache pools to fsimage + */ + private synchronized void savePools(DataOutput out, + String sdPath) throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_POOLS, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(cachePools.size()); + for (CachePool pool: cachePools.values()) { + pool.writeTo(out); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /* + * Save cache entries to fsimage + */ + private synchronized void saveEntries(DataOutput out, String sdPath) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_ENTRIES, sdPath); + prog.beginStep(Phase.SAVING_CHECKPOINT, step); + prog.setTotal(Phase.SAVING_CHECKPOINT, step, entriesById.size()); + Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); + out.writeInt(entriesById.size()); + for (PathBasedCacheEntry entry: entriesById.values()) { + out.writeLong(entry.getEntryId()); + Text.writeString(out, entry.getPath()); + Text.writeString(out, entry.getPool().getPoolName()); + counter.increment(); + } + prog.endStep(Phase.SAVING_CHECKPOINT, step); + } + + /** + * Load cache pools from fsimage + */ + private synchronized void loadPools(DataInput in) + throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_POOLS); + prog.beginStep(Phase.LOADING_FSIMAGE, step); + int numberOfPools = in.readInt(); + prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); + for (int i = 0; i < numberOfPools; i++) { + CachePool pool = CachePool.readFrom(in); + unprotectedAddCachePool(pool); + counter.increment(); + } + prog.endStep(Phase.LOADING_FSIMAGE, step); + } + + /** + * Load cache entries from the fsimage + */ + private synchronized void loadEntries(DataInput in) throws IOException { + StartupProgress prog = NameNode.getStartupProgress(); + Step step = new Step(StepType.CACHE_ENTRIES); + prog.beginStep(Phase.LOADING_FSIMAGE, step); + int numberOfEntries = in.readInt(); + prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfEntries); + Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); + for (int i = 0; i < numberOfEntries; i++) { + long entryId = in.readLong(); + String path = Text.readString(in); + String poolName = Text.readString(in); + // Get pool reference by looking it up in the map + CachePool pool = cachePools.get(poolName); + PathBasedCacheEntry entry = new PathBasedCacheEntry(entryId, path, pool); + unprotectedAddEntry(entry); + counter.increment(); + } + prog.endStep(Phase.LOADING_FSIMAGE, step); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index b553154c7d9..ff580f032df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.DataInput; +import java.io.DataOutput; import java.io.IOException; import javax.annotation.Nonnull; @@ -26,8 +28,15 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.util.XMLUtils; +import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; +import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; +import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; +import org.xml.sax.ContentHandler; +import org.xml.sax.SAXException; /** * A CachePool describes a set of cache resources being managed by the NameNode. @@ -63,7 +72,7 @@ public final class CachePool { private FsPermission mode; private int weight; - + public CachePool(String poolName, String ownerName, String groupName, FsPermission mode, Integer weight) throws IOException { this.poolName = poolName; @@ -86,10 +95,10 @@ public CachePool(String poolName, String ownerName, String groupName, } this.mode = mode != null ? new FsPermission(mode): FsPermission.getCachePoolDefault(); - this.weight = weight != null ? weight : 100; + this.weight = weight != null ? weight : DEFAULT_WEIGHT; } - public String getName() { + public String getPoolName() { return poolName; } @@ -162,4 +171,42 @@ public String toString() { append(", weight:").append(weight). append(" }").toString(); } + + public void writeTo(DataOutput out) throws IOException { + Text.writeString(out, poolName); + PermissionStatus perm = PermissionStatus.createImmutable( + ownerName, groupName, mode); + perm.write(out); + out.writeInt(weight); + } + + public static CachePool readFrom(DataInput in) throws IOException { + String poolName = Text.readString(in); + PermissionStatus perm = PermissionStatus.read(in); + int weight = in.readInt(); + return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), + perm.getPermission(), weight); + } + + public void writeXmlTo(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); + PermissionStatus perm = new PermissionStatus(ownerName, + groupName, mode); + FSEditLogOp.permissionStatusToXml(contentHandler, perm); + XMLUtils.addSaxString(contentHandler, "WEIGHT", Integer.toString(weight)); + } + + public static CachePool readXmlFrom(Stanza st) throws InvalidXmlException { + String poolName = st.getValue("POOLNAME"); + PermissionStatus perm = FSEditLogOp.permissionStatusFromXml(st); + int weight = Integer.parseInt(st.getValue("WEIGHT")); + try { + return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), + perm.getPermission(), weight); + } catch (IOException e) { + String error = "Invalid cache pool XML, missing fields."; + LOG.warn(error); + throw new InvalidXmlException(error); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 60ffe7ac172..3289799fb5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.Time.now; import java.io.IOException; @@ -35,15 +36,18 @@ import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; - -import static org.apache.hadoop.util.ExitUtil.terminate; - import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp; @@ -55,12 +59,17 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogSegmentOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; @@ -69,9 +78,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; -import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; @@ -948,6 +954,44 @@ void logDisallowSnapshot(String path) { logEdit(op); } + void logAddPathBasedCacheDirective(PathBasedCacheDirective directive, + boolean toLogRpcIds) { + AddPathBasedCacheDirectiveOp op = AddPathBasedCacheDirectiveOp.getInstance( + cache.get()) + .setPath(directive.getPath()) + .setPool(directive.getPool()); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logRemovePathBasedCacheDescriptor(Long id, boolean toLogRpcIds) { + RemovePathBasedCacheDescriptorOp op = + RemovePathBasedCacheDescriptorOp.getInstance(cache.get()).setId(id); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logAddCachePool(CachePool pool, boolean toLogRpcIds) { + AddCachePoolOp op = + AddCachePoolOp.getInstance(cache.get()).setPool(pool); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logModifyCachePool(CachePoolInfo info, boolean toLogRpcIds) { + ModifyCachePoolOp op = + ModifyCachePoolOp.getInstance(cache.get()).setInfo(info); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + + void logRemoveCachePool(String poolName, boolean toLogRpcIds) { + RemoveCachePoolOp op = + RemoveCachePoolOp.getInstance(cache.get()).setPoolName(poolName); + logRpcIds(op, toLogRpcIds); + logEdit(op); + } + /** * Get all the journals this edit log is currently operating on. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 03a1dbc1fbd..3233c1eb419 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -36,10 +36,14 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddPathBasedCacheDirectiveOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp; @@ -52,7 +56,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemovePathBasedCacheDescriptorOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; @@ -76,6 +83,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.Holder; +import org.apache.jasper.tagplugins.jstl.core.Remove; import com.google.common.base.Joiner; @@ -631,6 +639,56 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, fsNamesys.setLastAllocatedBlockId(allocateBlockIdOp.blockId); break; } + case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: { + AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op; + PathBasedCacheDirective d = new PathBasedCacheDirective(addOp.path, + addOp.pool); + PathBasedCacheDescriptor descriptor = + fsNamesys.getCacheManager().unprotectedAddDirective(d); + + if (toAddRetryCache) { + fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, + descriptor); + } + break; + } + case OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR: { + RemovePathBasedCacheDescriptorOp removeOp = + (RemovePathBasedCacheDescriptorOp) op; + fsNamesys.getCacheManager().unprotectedRemoveDescriptor(removeOp.id); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_ADD_CACHE_POOL: { + AddCachePoolOp addOp = (AddCachePoolOp) op; + fsNamesys.getCacheManager().unprotectedAddCachePool(addOp.pool); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_MODIFY_CACHE_POOL: { + ModifyCachePoolOp modifyOp = (ModifyCachePoolOp) op; + fsNamesys.getCacheManager().modifyCachePool(modifyOp.info); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } + case OP_REMOVE_CACHE_POOL: { + RemoveCachePoolOp removeOp = (RemoveCachePoolOp) op; + fsNamesys.getCacheManager().removeCachePool(removeOp.poolName); + + if (toAddRetryCache) { + fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); + } + break; + } default: throw new IOException("Invalid operation read " + op.opCode); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 10432bfd8e1..da5a04a2094 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_PATH_BASED_CACHE_DIRECTIVE; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN; @@ -32,7 +34,10 @@ import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL; +import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT; @@ -56,6 +61,7 @@ import java.io.DataOutputStream; import java.io.EOFException; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; import java.util.List; @@ -73,6 +79,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LayoutVersion; @@ -97,7 +104,9 @@ import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; /** * Helper classes for reading the ops from an InputStream. @@ -153,6 +162,13 @@ public OpInstanceCache() { inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp()); inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op()); inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp()); + inst.put(OP_ADD_PATH_BASED_CACHE_DIRECTIVE, + new AddPathBasedCacheDirectiveOp()); + inst.put(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR, + new RemovePathBasedCacheDescriptorOp()); + inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp()); + inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp()); + inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp()); } public FSEditLogOp get(FSEditLogOpCodes opcode) { @@ -528,8 +544,7 @@ void fromXml(Stanza st) throws InvalidXmlException { } else { this.blocks = new Block[0]; } - this.permissions = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissions = permissionStatusFromXml(st); readRpcIdsFromXml(st); } } @@ -1208,8 +1223,7 @@ protected void toXml(ContentHandler contentHandler) throws SAXException { this.inodeId = Long.valueOf(st.getValue("INODEID")); this.path = st.getValue("PATH"); this.timestamp = Long.valueOf(st.getValue("TIMESTAMP")); - this.permissions = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissions = permissionStatusFromXml(st); } } @@ -1940,8 +1954,7 @@ void fromXml(Stanza st) throws InvalidXmlException { this.value = st.getValue("VALUE"); this.mtime = Long.valueOf(st.getValue("MTIME")); this.atime = Long.valueOf(st.getValue("ATIME")); - this.permissionStatus = - permissionStatusFromXml(st.getChildren("PERMISSION_STATUS").get(0)); + this.permissionStatus = permissionStatusFromXml(st); readRpcIdsFromXml(st); } @@ -2848,6 +2861,266 @@ public String toString() { } } + static class AddPathBasedCacheDirectiveOp extends FSEditLogOp { + + String path; + String pool; + + public AddPathBasedCacheDirectiveOp() { + super(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + } + + static AddPathBasedCacheDirectiveOp getInstance(OpInstanceCache cache) { + return (AddPathBasedCacheDirectiveOp) cache + .get(OP_ADD_PATH_BASED_CACHE_DIRECTIVE); + } + + public AddPathBasedCacheDirectiveOp setPath(String path) { + this.path = path; + return this; + } + + public AddPathBasedCacheDirectiveOp setPool(String pool) { + this.pool = pool; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + this.path = FSImageSerialization.readString(in); + this.pool = FSImageSerialization.readString(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeString(path, out); + FSImageSerialization.writeString(pool, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "PATH", path); + XMLUtils.addSaxString(contentHandler, "POOL", pool); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + path = st.getValue("PATH"); + pool = st.getValue("POOL"); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AddPathBasedCacheDirective ["); + builder.append("path=" + path + ","); + builder.append("pool=" + pool + "]"); + return builder.toString(); + } + } + + static class RemovePathBasedCacheDescriptorOp extends FSEditLogOp { + long id; + + public RemovePathBasedCacheDescriptorOp() { + super(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR); + } + + static RemovePathBasedCacheDescriptorOp getInstance(OpInstanceCache cache) { + return (RemovePathBasedCacheDescriptorOp) cache + .get(OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR); + } + + public RemovePathBasedCacheDescriptorOp setId(long id) { + this.id = id; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + this.id = FSImageSerialization.readLong(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeLong(id, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "ID", Long.toString(id)); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.id = Long.parseLong(st.getValue("ID")); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("RemovePathBasedCacheDescriptor ["); + builder.append("id=" + Long.toString(id) + "]"); + return builder.toString(); + } + } + + static class AddCachePoolOp extends FSEditLogOp { + CachePool pool; + + public AddCachePoolOp() { + super(OP_ADD_CACHE_POOL); + } + + static AddCachePoolOp getInstance(OpInstanceCache cache) { + return (AddCachePoolOp) cache.get(OP_ADD_CACHE_POOL); + } + + public AddCachePoolOp setPool(CachePool pool) { + this.pool = pool; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + pool = CachePool.readFrom(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + pool.writeTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + pool.writeXmlTo(contentHandler); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.pool = CachePool.readXmlFrom(st); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("AddCachePoolOp ["); + builder.append("poolName=" + pool.getPoolName() + ","); + builder.append("ownerName=" + pool.getOwnerName() + ","); + builder.append("groupName=" + pool.getGroupName() + ","); + builder.append("mode=" + Short.toString(pool.getMode().toShort()) + ","); + builder.append("weight=" + Integer.toString(pool.getWeight()) + "]"); + return builder.toString(); + } + } + + static class ModifyCachePoolOp extends FSEditLogOp { + CachePoolInfo info; + + public ModifyCachePoolOp() { + super(OP_MODIFY_CACHE_POOL); + } + + static ModifyCachePoolOp getInstance(OpInstanceCache cache) { + return (ModifyCachePoolOp) cache.get(OP_MODIFY_CACHE_POOL); + } + + public ModifyCachePoolOp setInfo(CachePoolInfo info) { + this.info = info; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + info = CachePoolInfo.readFrom(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + info.writeTo(out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + cachePoolInfoToXml(contentHandler, info); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.info = cachePoolInfoFromXml(st); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ModifyCachePoolOp ["); + ArrayList fields = new ArrayList(5); + if (info.getPoolName() != null) { + fields.add("poolName=" + info.getPoolName()); + } + if (info.getOwnerName() != null) { + fields.add("ownerName=" + info.getOwnerName()); + } + if (info.getGroupName() != null) { + fields.add("groupName=" + info.getGroupName()); + } + if (info.getMode() != null) { + fields.add("mode=" + info.getMode().toString()); + } + if (info.getWeight() != null) { + fields.add("weight=" + info.getWeight()); + } + builder.append(Joiner.on(",").join(fields)); + builder.append("]"); + return builder.toString(); + } + } + + static class RemoveCachePoolOp extends FSEditLogOp { + String poolName; + + public RemoveCachePoolOp() { + super(OP_REMOVE_CACHE_POOL); + } + + static RemoveCachePoolOp getInstance(OpInstanceCache cache) { + return (RemoveCachePoolOp) cache.get(OP_REMOVE_CACHE_POOL); + } + + public RemoveCachePoolOp setPoolName(String poolName) { + this.poolName = poolName; + return this; + } + + @Override + void readFields(DataInputStream in, int logVersion) throws IOException { + poolName = FSImageSerialization.readString(in); + } + + @Override + public void writeFields(DataOutputStream out) throws IOException { + FSImageSerialization.writeString(poolName, out); + } + + @Override + protected void toXml(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); + } + + @Override + void fromXml(Stanza st) throws InvalidXmlException { + this.poolName = st.getValue("POOLNAME"); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("RemoveCachePoolOp ["); + builder.append("poolName=" + poolName + "]"); + return builder.toString(); + } + } + static private short readShort(DataInputStream in) throws IOException { return Short.parseShort(FSImageSerialization.readString(in)); } @@ -3235,16 +3508,65 @@ public static void permissionStatusToXml(ContentHandler contentHandler, contentHandler.startElement("", "", "PERMISSION_STATUS", new AttributesImpl()); XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName()); XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName()); - XMLUtils.addSaxString(contentHandler, "MODE", - Short.valueOf(perm.getPermission().toShort()).toString()); + fsPermissionToXml(contentHandler, perm.getPermission()); contentHandler.endElement("", "", "PERMISSION_STATUS"); } public static PermissionStatus permissionStatusFromXml(Stanza st) throws InvalidXmlException { - String username = st.getValue("USERNAME"); - String groupname = st.getValue("GROUPNAME"); + Stanza status = st.getChildren("PERMISSION_STATUS").get(0); + String username = status.getValue("USERNAME"); + String groupname = status.getValue("GROUPNAME"); + FsPermission mode = fsPermissionFromXml(status); + return new PermissionStatus(username, groupname, mode); + } + + public static void fsPermissionToXml(ContentHandler contentHandler, + FsPermission mode) throws SAXException { + XMLUtils.addSaxString(contentHandler, "MODE", Short.valueOf(mode.toShort()) + .toString()); + } + + public static FsPermission fsPermissionFromXml(Stanza st) + throws InvalidXmlException { short mode = Short.valueOf(st.getValue("MODE")); - return new PermissionStatus(username, groupname, new FsPermission(mode)); + return new FsPermission(mode); + } + + public static void cachePoolInfoToXml(ContentHandler contentHandler, + CachePoolInfo info) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", info.getPoolName()); + if (info.getOwnerName() != null) { + XMLUtils.addSaxString(contentHandler, "OWNERNAME", info.getOwnerName()); + } + if (info.getGroupName() != null) { + XMLUtils.addSaxString(contentHandler, "GROUPNAME", info.getGroupName()); + } + if (info.getMode() != null) { + fsPermissionToXml(contentHandler, info.getMode()); + } + if (info.getWeight() != null) { + XMLUtils.addSaxString(contentHandler, "WEIGHT", + Integer.toString(info.getWeight())); + } + } + + public static CachePoolInfo cachePoolInfoFromXml(Stanza st) + throws InvalidXmlException { + String poolName = st.getValue("POOLNAME"); + CachePoolInfo info = new CachePoolInfo(poolName); + if (st.hasChildren("OWNERNAME")) { + info.setOwnerName(st.getValue("OWNERNAME")); + } + if (st.hasChildren("GROUPNAME")) { + info.setGroupName(st.getValue("GROUPNAME")); + } + if (st.hasChildren("MODE")) { + info.setMode(FSEditLogOp.fsPermissionFromXml(st)); + } + if (st.hasChildren("WEIGHT")) { + info.setWeight(Integer.parseInt(st.getValue("WEIGHT"))); + } + return info; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java index 751eb10d6c9..b9efc1e16a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java @@ -63,7 +63,13 @@ public enum FSEditLogOpCodes { OP_ALLOW_SNAPSHOT ((byte) 29), OP_DISALLOW_SNAPSHOT ((byte) 30), OP_SET_GENSTAMP_V2 ((byte) 31), - OP_ALLOCATE_BLOCK_ID ((byte) 32); + OP_ALLOCATE_BLOCK_ID ((byte) 32), + OP_ADD_PATH_BASED_CACHE_DIRECTIVE ((byte) 33), + OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR ((byte) 34), + OP_ADD_CACHE_POOL ((byte) 35), + OP_MODIFY_CACHE_POOL ((byte) 36), + OP_REMOVE_CACHE_POOL ((byte) 37); + private byte opCode; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 74f5219c491..34659fbc7cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -351,6 +351,8 @@ void load(File curFile) throws IOException { loadSecretManagerState(in); + loadCacheManagerState(in); + // make sure to read to the end of file boolean eof = (in.read() == -1); assert eof : "Should have reached the end of image file " + curFile; @@ -843,6 +845,14 @@ private void loadSecretManagerState(DataInput in) namesystem.loadSecretManagerState(in); } + private void loadCacheManagerState(DataInput in) throws IOException { + int imgVersion = getLayoutVersion(); + if (!LayoutVersion.supports(Feature.CACHING, imgVersion)) { + return; + } + namesystem.getCacheManager().loadState(in); + } + private int getLayoutVersion() { return namesystem.getFSImage().getStorage().getLayoutVersion(); } @@ -985,6 +995,8 @@ void save(File newFile, FSImageCompression compression) throws IOException { context.checkCancelled(); sourceNamesystem.saveSecretManagerState(out, sdPath); context.checkCancelled(); + sourceNamesystem.getCacheManager().saveState(out, sdPath); + context.checkCancelled(); out.flush(); context.checkCancelled(); fout.getChannel().force(true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 0299ee7a7b3..6c5040989e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -227,7 +227,6 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -1956,7 +1955,7 @@ boolean setCacheReplicationInt(String src, final short replication) getEditLog().logSync(); if (isFile) { - logAuditEvent(true, "setReplication", src); + logAuditEvent(true, "setCacheReplication", src); } return isFile; } @@ -6884,10 +6883,10 @@ void removeSnapshottableDirs(List toRemove) { PathBasedCacheDescriptor addPathBasedCacheDirective( PathBasedCacheDirective directive) throws IOException { - CacheEntryWithPayload retryCacheEntry = + CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null); - if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { - return (PathBasedCacheDescriptor) retryCacheEntry.getPayload(); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return (PathBasedCacheDescriptor) cacheEntry.getPayload(); } final FSPermissionChecker pc = isPermissionEnabled ? getPermissionChecker() : null; @@ -6902,7 +6901,8 @@ PathBasedCacheDescriptor addPathBasedCacheDirective( "Cannot add PathBasedCache directive", safeMode); } result = cacheManager.addDirective(directive, pc); - //getEditLog().logAddPathBasedCacheDirective(result); FIXME: HDFS-5119 + getEditLog().logAddPathBasedCacheDirective(directive, + cacheEntry != null); success = true; } finally { writeUnlock(); @@ -6912,14 +6912,14 @@ PathBasedCacheDescriptor addPathBasedCacheDirective( if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(success, "addPathBasedCacheDirective", null, null, null); } - RetryCache.setState(retryCacheEntry, success, result); + RetryCache.setState(cacheEntry, success, result); } return result; } void removePathBasedCacheDescriptor(Long id) throws IOException { - CacheEntry retryCacheEntry = RetryCache.waitForCompletion(retryCache); - if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { return; } final FSPermissionChecker pc = isPermissionEnabled ? @@ -6934,7 +6934,7 @@ void removePathBasedCacheDescriptor(Long id) throws IOException { "Cannot remove PathBasedCache directives", safeMode); } cacheManager.removeDescriptor(id, pc); - //getEditLog().logRemovePathBasedCacheEntries(results); FIXME: HDFS-5119 + getEditLog().logRemovePathBasedCacheDescriptor(id, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -6942,7 +6942,7 @@ void removePathBasedCacheDescriptor(Long id) throws IOException { logAuditEvent(success, "removePathBasedCacheDescriptors", null, null, null); } - RetryCache.setState(retryCacheEntry, success); + RetryCache.setState(cacheEntry, success); } getEditLog().logSync(); } @@ -6989,8 +6989,8 @@ public void addCachePool(CachePoolInfo req) throws IOException { if (pc != null) { pc.checkSuperuserPrivilege(); } - cacheManager.addCachePool(req); - //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 + CachePool pool = cacheManager.addCachePool(req); + getEditLog().logAddCachePool(pool, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -7023,7 +7023,7 @@ public void modifyCachePool(CachePoolInfo req) throws IOException { pc.checkSuperuserPrivilege(); } cacheManager.modifyCachePool(req); - //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 + getEditLog().logModifyCachePool(req, cacheEntry != null); success = true; } finally { writeUnlock(); @@ -7056,7 +7056,7 @@ public void removeCachePool(String cachePoolName) throws IOException { pc.checkSuperuserPrivilege(); } cacheManager.removeCachePool(cachePoolName); - //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 + getEditLog().logRemoveCachePool(cachePoolName, cacheEntry != null); success = true; } finally { writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java index 2ef9c8e7013..1b43d6a2b09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StepType.java @@ -42,7 +42,17 @@ public enum StepType { /** * The namenode is performing an operation related to inodes. */ - INODES("Inodes", "inodes"); + INODES("Inodes", "inodes"), + + /** + * The namenode is performing an operation related to cache pools. + */ + CACHE_POOLS("CachePools", "cache pools"), + + /** + * The namenode is performing an operation related to cache entries. + */ + CACHE_ENTRIES("CacheEntries", "cache entries"); private final String name, description; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index d23b27b7d35..411fc16ab09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -126,7 +126,7 @@ class ImageLoaderCurrent implements ImageLoader { new SimpleDateFormat("yyyy-MM-dd HH:mm"); private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39, - -40, -41, -42, -43, -44, -45, -46, -47 }; + -40, -41, -42, -43, -44, -45, -46, -47, -48 }; private int imageVersion = 0; private final Map subtreeMap = new HashMap(); @@ -216,6 +216,9 @@ public void loadImage(DataInputStream in, ImageVisitor v, processDelegationTokens(in, v); } + if (LayoutVersion.supports(Feature.CACHING, imageVersion)) { + processCacheManagerState(in, v); + } v.leaveEnclosingElement(); // FSImage done = true; } finally { @@ -227,6 +230,24 @@ public void loadImage(DataInputStream in, ImageVisitor v, } } + /** + * Process CacheManager state from the fsimage. + */ + private void processCacheManagerState(DataInputStream in, ImageVisitor v) + throws IOException { + v.visit(ImageElement.CACHE_NEXT_ENTRY_ID, in.readLong()); + final int numPools = in.readInt(); + for (int i=0; i paths = new ArrayList(numFiles); for (int i=0; i pit = dfs.listCachePools(); + assertTrue("No cache pools found", pit.hasNext()); + CachePoolInfo info = pit.next(); + assertEquals(pool, info.getPoolName()); + assertEquals(groupName, info.getGroupName()); + assertEquals(mode, info.getMode()); + assertEquals(weight, (int)info.getWeight()); + assertFalse("Unexpected # of cache pools found", pit.hasNext()); + + // Create some cache entries + int numEntries = 10; + String entryPrefix = "/party-"; + for (int i=0; i dit + = dfs.listPathBasedCacheDescriptors(null, null); + for (int i=0; i - -47 + -48 OP_START_LOG_SEGMENT @@ -13,8 +13,8 @@ 2 1 - 1375509063810 - 4d47710649039b98 + 1381014414770 + 0ed3ccccde5c0830 @@ -24,8 +24,8 @@ 3 2 - 1375509063812 - 38cbb1d8fd90fcb2 + 1381014414779 + 1619312c238cd1b1 @@ -37,18 +37,18 @@ 16386 /file_create_u\0001;F431 1 - 1374817864805 - 1374817864805 + 1380323216882 + 1380323216882 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 8 + ff07f00d-efa9-4b76-a064-63604cd3286e + 7 @@ -59,13 +59,13 @@ 0 /file_create_u\0001;F431 1 - 1374817864816 - 1374817864805 + 1380323216937 + 1380323216882 512 - jing + andrew supergroup 420 @@ -78,9 +78,9 @@ 0 /file_create_u\0001;F431 /file_moved - 1374817864818 - 5245793a-984b-4264-8d7c-7890775547a0 - 10 + 1380323216955 + ff07f00d-efa9-4b76-a064-63604cd3286e + 9 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1374817864822 - 5245793a-984b-4264-8d7c-7890775547a0 - 11 + 1380323216966 + ff07f00d-efa9-4b76-a064-63604cd3286e + 10 @@ -101,9 +101,9 @@ 0 16387 /directory_mkdir - 1374817864825 + 1380323216981 - jing + andrew supergroup 493 @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - 5245793a-984b-4264-8d7c-7890775547a0 - 16 + ff07f00d-efa9-4b76-a064-63604cd3286e + 15 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - 5245793a-984b-4264-8d7c-7890775547a0 - 17 + ff07f00d-efa9-4b76-a064-63604cd3286e + 16 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - 5245793a-984b-4264-8d7c-7890775547a0 - 18 + ff07f00d-efa9-4b76-a064-63604cd3286e + 17 @@ -169,18 +169,18 @@ 16388 /file_create_u\0001;F431 1 - 1374817864846 - 1374817864846 + 1380323217070 + 1380323217070 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 19 + ff07f00d-efa9-4b76-a064-63604cd3286e + 18 @@ -191,13 +191,13 @@ 0 /file_create_u\0001;F431 1 - 1374817864848 - 1374817864846 + 1380323217079 + 1380323217070 512 - jing + andrew supergroup 420 @@ -253,10 +253,10 @@ 0 /file_create_u\0001;F431 /file_moved - 1374817864860 + 1380323217151 NONE - 5245793a-984b-4264-8d7c-7890775547a0 - 26 + ff07f00d-efa9-4b76-a064-63604cd3286e + 25 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1374817864864 - 1374817864864 + 1380323217170 + 1380323217170 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 28 + ff07f00d-efa9-4b76-a064-63604cd3286e + 27 @@ -388,8 +388,8 @@ 0 /file_concat_target 1 - 1374817864927 - 1374817864864 + 1380323217424 + 1380323217170 512 @@ -409,7 +409,7 @@ 1003 - jing + andrew supergroup 420 @@ -423,18 +423,18 @@ 16390 /file_concat_0 1 - 1374817864929 - 1374817864929 + 1380323217436 + 1380323217436 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 41 + ff07f00d-efa9-4b76-a064-63604cd3286e + 40 @@ -544,8 +544,8 @@ 0 /file_concat_0 1 - 1374817864947 - 1374817864929 + 1380323217529 + 1380323217436 512 @@ -565,7 +565,7 @@ 1006 - jing + andrew supergroup 420 @@ -579,18 +579,18 @@ 16391 /file_concat_1 1 - 1374817864950 - 1374817864950 + 1380323217542 + 1380323217542 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 53 + ff07f00d-efa9-4b76-a064-63604cd3286e + 52 @@ -700,8 +700,8 @@ 0 /file_concat_1 1 - 1374817864966 - 1374817864950 + 1380323217613 + 1380323217542 512 @@ -721,7 +721,7 @@ 1009 - jing + andrew supergroup 420 @@ -733,13 +733,13 @@ 56 0 /file_concat_target - 1374817864967 + 1380323217627 /file_concat_0 /file_concat_1 - 5245793a-984b-4264-8d7c-7890775547a0 - 64 + ff07f00d-efa9-4b76-a064-63604cd3286e + 63 @@ -750,15 +750,15 @@ 16392 /file_symlink /file_concat_target - 1374817864971 - 1374817864971 + 1380323217643 + 1380323217643 - jing + andrew supergroup 511 - 5245793a-984b-4264-8d7c-7890775547a0 - 65 + ff07f00d-efa9-4b76-a064-63604cd3286e + 64 @@ -768,14 +768,14 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - 1374904264974 + 1380409617655 @@ -785,14 +785,14 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - 1374904265012 + 1380409617701 @@ -802,55 +802,101 @@ HDFS_DELEGATION_TOKEN 1 - jing + andrew JobTracker - 1374817864974 - 1375422664974 + 1380323217655 + 1380928017655 2 - OP_ADD + OP_ADD_CACHE_POOL 61 + poolparty + + andrew + andrew + 493 + + 100 + + + + OP_MODIFY_CACHE_POOL + + 62 + poolparty + carlton + party + 448 + 1989 + + + + OP_ADD_PATH_BASED_CACHE_DIRECTIVE + + 63 + /bar + poolparty + + + + OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR + + 64 + 1 + + + + OP_REMOVE_CACHE_POOL + + 65 + poolparty + + + + OP_ADD + + 66 0 16393 /hard-lease-recovery-test 1 - 1374817865017 - 1374817865017 + 1380323217822 + 1380323217822 512 - DFSClient_NONMAPREDUCE_-1676409172_1 + DFSClient_NONMAPREDUCE_1160098410_1 127.0.0.1 - jing + andrew supergroup 420 - 5245793a-984b-4264-8d7c-7890775547a0 - 69 + ff07f00d-efa9-4b76-a064-63604cd3286e + 73 OP_ALLOCATE_BLOCK_ID - 62 + 67 1073741834 OP_SET_GENSTAMP_V2 - 63 + 68 1010 OP_UPDATE_BLOCKS - 64 + 69 /hard-lease-recovery-test 1073741834 @@ -864,7 +910,7 @@ OP_UPDATE_BLOCKS - 65 + 70 /hard-lease-recovery-test 1073741834 @@ -878,15 +924,31 @@ OP_SET_GENSTAMP_V2 - 66 + 71 1011 OP_REASSIGN_LEASE - 67 - DFSClient_NONMAPREDUCE_-1676409172_1 + 72 + DFSClient_NONMAPREDUCE_1160098410_1 + /hard-lease-recovery-test + HDFS_NameNode + + + + OP_SET_GENSTAMP_V2 + + 73 + 1012 + + + + OP_REASSIGN_LEASE + + 74 + HDFS_NameNode /hard-lease-recovery-test HDFS_NameNode @@ -894,23 +956,23 @@ OP_CLOSE - 68 + 75 0 0 /hard-lease-recovery-test 1 - 1374817867688 - 1374817865017 + 1380323222701 + 1380323217822 512 1073741834 11 - 1011 + 1012 - jing + andrew supergroup 420 @@ -919,7 +981,7 @@ OP_END_LOG_SEGMENT - 69 + 76 From eb2175db1a99348c80457e3ffda172cc461de8bc Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 4 Oct 2013 22:28:23 +0000 Subject: [PATCH 29/51] HDFS-5190. Move cache pool related CLI commands to CacheAdmin. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1529334 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 2 +- .../hdfs/server/namenode/CacheManager.java | 2 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 542 +++++++++++++++--- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 311 ---------- .../hadoop/hdfs/tools/TableListing.java | 225 ++++++-- .../apache/hadoop/cli/TestCacheAdminCLI.java | 141 +++++ .../hadoop/cli/util/CLICommandCacheAdmin.java | 21 + .../cli/util/CacheAdminCmdExecutor.java | 37 ++ .../src/test/resources/testCacheAdminConf.xml | 211 +++++++ .../src/test/resources/testHDFSConf.xml | 65 --- 12 files changed, 1071 insertions(+), 493 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index c9b84d9aa66..afd3afe92cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -51,6 +51,9 @@ HDFS-4949 (Unreleased) HDFS-5119. Persist CacheManager state in the edit log. (Contributed by Andrew Wang) + HDFS-5190. Move cache pool related CLI commands to CacheAdmin. + (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8ea0939f9de..c37d86d7c51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2294,11 +2294,11 @@ public PathBasedCacheDescriptor addPathBasedCacheDirective( } } - public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor) + public void removePathBasedCacheDescriptor(long id) throws IOException { checkOpen(); try { - namenode.removePathBasedCacheDescriptor(descriptor.getEntryId()); + namenode.removePathBasedCacheDescriptor(id); } catch (RemoteException re) { throw re.unwrapRemoteException(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 11d5fb05bed..2ece7640a77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1602,7 +1602,7 @@ public PathBasedCacheDescriptor addPathBasedCacheDirective( */ public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor) throws IOException { - dfs.removePathBasedCacheDescriptor(descriptor); + dfs.removePathBasedCacheDescriptor(descriptor.getEntryId()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 9e1000934c3..bb5e07848b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -469,7 +469,7 @@ public synchronized void removeCachePool(String poolName) while (iter.hasNext()) { Entry entry = iter.next(); if (entry.getValue().getPool() == pool) { - entriesById.remove(entry.getValue().getEntryId()); + entriesByPath.remove(entry.getValue().getPath()); iter.remove(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index 0ba9023d923..f0a71c595b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -21,24 +21,76 @@ import java.util.LinkedList; import java.util.List; +import org.apache.commons.lang.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.tools.TableListing.Justification; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Tool; + +import com.google.common.base.Joiner; /** * This class implements command-line operations on the HDFS Cache. */ @InterfaceAudience.Private -public class CacheAdmin { - private static Configuration conf = new Configuration(); +public class CacheAdmin extends Configured implements Tool { - private static DistributedFileSystem getDFS() throws IOException { + /** + * Maximum length for printed lines + */ + private static final int MAX_LINE_WIDTH = 80; + + public CacheAdmin() { + this(null); + } + + public CacheAdmin(Configuration conf) { + super(conf); + } + + @Override + public int run(String[] args) throws IOException { + if (args.length == 0) { + printUsage(false); + return 1; + } + Command command = determineCommand(args[0]); + if (command == null) { + System.err.println("Can't understand command '" + args[0] + "'"); + if (!args[0].startsWith("-")) { + System.err.println("Command names must start with dashes."); + } + printUsage(false); + return 1; + } + List argsList = new LinkedList(); + for (int j = 1; j < args.length; j++) { + argsList.add(args[j]); + } + return command.run(getConf(), argsList); + } + + public static void main(String[] argsArray) throws IOException { + CacheAdmin cacheAdmin = new CacheAdmin(new Configuration()); + System.exit(cacheAdmin.run(argsArray)); + } + + private static DistributedFileSystem getDFS(Configuration conf) + throws IOException { FileSystem fs = FileSystem.get(conf); if (!(fs instanceof DistributedFileSystem)) { throw new IllegalArgumentException("FileSystem " + fs.getUri() + @@ -47,37 +99,55 @@ private static DistributedFileSystem getDFS() throws IOException { return (DistributedFileSystem)fs; } + /** + * NN exceptions contain the stack trace as part of the exception message. + * When it's a known error, pretty-print the error and squish the stack trace. + */ + private static String prettifyException(Exception e) { + return e.getClass().getSimpleName() + ": " + + e.getLocalizedMessage().split("\n")[0]; + } + + private static TableListing getOptionDescriptionListing() { + TableListing listing = new TableListing.Builder() + .addField("").addField("", true) + .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build(); + return listing; + } + interface Command { String getName(); String getShortUsage(); String getLongUsage(); - int run(List args) throws IOException; + int run(Configuration conf, List args) throws IOException; } private static class AddPathBasedCacheDirectiveCommand implements Command { @Override public String getName() { - return "-addPath"; + return "-addDirective"; } @Override public String getShortUsage() { - return "[-addPath -path -pool ]\n"; + return "[" + getName() + " -path -pool ]\n"; } @Override public String getLongUsage() { - return getShortUsage() + - "Adds a new PathBasedCache directive.\n" + - " The new path to cache.\n" + - " Paths may be either directories or files.\n" + - " The pool which this directive will reside in.\n" + - " You must have write permission on the cache pool in order\n" + - " to add new entries to it.\n"; + TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "A path to cache. The path can be " + + "a directory or a file."); + listing.addRow("", "The pool to which the directive will be " + + "added. You must have write permission on the cache pool " + + "in order to add new directives."); + return getShortUsage() + "\n" + + "Add a new PathBasedCache directive.\n\n" + + listing.toString(); } @Override - public int run(List args) throws IOException { + public int run(Configuration conf, List args) throws IOException { String path = StringUtils.popOptionWithArgument("-path", args); if (path == null) { System.err.println("You must specify a path with -path."); @@ -93,14 +163,20 @@ public int run(List args) throws IOException { return 1; } - DistributedFileSystem dfs = getDFS(); + DistributedFileSystem dfs = getDFS(conf); PathBasedCacheDirective directive = new PathBasedCacheDirective(path, poolName); - PathBasedCacheDescriptor descriptor = - dfs.addPathBasedCacheDirective(directive); - System.out.println("Added PathBasedCache entry " - + descriptor.getEntryId()); + try { + PathBasedCacheDescriptor descriptor = + dfs.addPathBasedCacheDirective(directive); + System.out.println("Added PathBasedCache entry " + + descriptor.getEntryId()); + } catch (AddPathBasedCacheDirectiveException e) { + System.err.println(prettifyException(e)); + return 2; + } + return 0; } } @@ -108,32 +184,41 @@ public int run(List args) throws IOException { private static class RemovePathBasedCacheDirectiveCommand implements Command { @Override public String getName() { - return "-removePath"; + return "-removeDirective"; } @Override public String getShortUsage() { - return "[-removePath ]\n"; + return "[" + getName() + " ]\n"; } @Override public String getLongUsage() { - return getShortUsage() + - "Remove a cache directive.\n" + - " The id of the cache directive to remove.\n" + - " You must have write permission on the pool where the\n" + - " directive resides in order to remove it. To see a list\n" + - " of PathBasedCache directive IDs, use the -list command.\n"; + TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "The id of the cache directive to remove. " + + "You must have write permission on the pool of the " + + "directive in order to remove it. To see a list " + + "of PathBasedCache directive IDs, use the -list command."); + return getShortUsage() + "\n" + + "Remove a cache directive.\n\n" + + listing.toString(); } @Override - public int run(List args) throws IOException { + public int run(Configuration conf, List args) throws IOException { String idString= StringUtils.popFirstNonOption(args); if (idString == null) { System.err.println("You must specify a directive ID to remove."); return 1; } - long id = Long.valueOf(idString); + long id; + try { + id = Long.valueOf(idString); + } catch (NumberFormatException e) { + System.err.println("Invalid directive ID " + idString + ": expected " + + "a numeric value."); + return 1; + } if (id <= 0) { System.err.println("Invalid directive ID " + id + ": ids must " + "be greater than 0."); @@ -141,12 +226,17 @@ public int run(List args) throws IOException { } if (!args.isEmpty()) { System.err.println("Can't understand argument: " + args.get(0)); + System.err.println("Usage is " + getShortUsage()); return 1; } - DistributedFileSystem dfs = getDFS(); - dfs.removePathBasedCacheDescriptor(new PathBasedCacheDescriptor(id, null, - null)); - System.out.println("Removed PathBasedCache directive " + id); + DistributedFileSystem dfs = getDFS(conf); + try { + dfs.getClient().removePathBasedCacheDescriptor(id); + System.out.println("Removed PathBasedCache directive " + id); + } catch (RemovePathBasedCacheDescriptorException e) { + System.err.println(prettifyException(e)); + return 2; + } return 0; } } @@ -154,31 +244,30 @@ public int run(List args) throws IOException { private static class ListPathBasedCacheDirectiveCommand implements Command { @Override public String getName() { - return "-listPaths"; + return "-listDirectives"; } @Override public String getShortUsage() { - return "[-listPaths [-path ] [-pool ]]\n"; + return "[" + getName() + " [-path ] [-pool ]]\n"; } @Override public String getLongUsage() { - return getShortUsage() + - "List PathBasedCache directives.\n" + - " If a -path argument is given, we will list only\n" + - " PathBasedCache entries with this path.\n" + - " Note that if there is a PathBasedCache directive for \n" + - " in a cache pool that we don't have read access for, it\n" + - " not be listed. If there are unreadable cache pools, a\n" + - " message will be printed.\n" + - " may be incomplete.\n" + - " If a -pool argument is given, we will list only path\n" + - " cache entries in that pool.\n"; + TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "List only " + + "PathBasedCache directives with this path. " + + "Note that if there is a PathBasedCache directive for " + + "in a cache pool that we don't have read access for, it " + + "will not be listed."); + listing.addRow("", "List only path cache directives in that pool."); + return getShortUsage() + "\n" + + "List PathBasedCache directives.\n\n" + + listing.toString(); } @Override - public int run(List args) throws IOException { + public int run(Configuration conf, List args) throws IOException { String pathFilter = StringUtils.popOptionWithArgument("-path", args); String poolFilter = StringUtils.popOptionWithArgument("-pool", args); if (!args.isEmpty()) { @@ -186,11 +275,11 @@ public int run(List args) throws IOException { return 1; } TableListing tableListing = new TableListing.Builder(). - addField("ID", Justification.RIGHT). + addField("ID", Justification.LEFT). addField("POOL", Justification.LEFT). addField("PATH", Justification.LEFT). build(); - DistributedFileSystem dfs = getDFS(); + DistributedFileSystem dfs = getDFS(conf); RemoteIterator iter = dfs.listPathBasedCacheDescriptors(poolFilter, pathFilter); int numEntries = 0; @@ -205,12 +294,325 @@ public int run(List args) throws IOException { System.out.print(String.format("Found %d entr%s\n", numEntries, numEntries == 1 ? "y" : "ies")); if (numEntries > 0) { - System.out.print(tableListing.build()); + System.out.print(tableListing); } return 0; } } + private static class AddCachePoolCommand implements Command { + + private static final String NAME = "-addPool"; + + @Override + public String getName() { + return NAME; + } + + @Override + public String getShortUsage() { + return "[" + NAME + " [-owner ] " + + "[-group ] [-mode ] [-weight ]]\n"; + } + + @Override + public String getLongUsage() { + TableListing listing = getOptionDescriptionListing(); + + listing.addRow("", "Name of the new pool."); + listing.addRow("", "Username of the owner of the pool. " + + "Defaults to the current user."); + listing.addRow("", "Group of the pool. " + + "Defaults to the primary group name of the current user."); + listing.addRow("", "UNIX-style permissions for the pool. " + + "Permissions are specified in octal, e.g. 0755. " + + "By default, this is set to " + String.format("0%03o", + FsPermission.getCachePoolDefault().toShort())); + listing.addRow("", "Weight of the pool. " + + "This is a relative measure of the importance of the pool used " + + "during cache resource management. By default, it is set to " + + CachePool.DEFAULT_WEIGHT); + + return getShortUsage() + "\n" + + "Add a new cache pool.\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + String owner = StringUtils.popOptionWithArgument("-owner", args); + if (owner == null) { + owner = UserGroupInformation.getCurrentUser().getShortUserName(); + } + String group = StringUtils.popOptionWithArgument("-group", args); + if (group == null) { + group = UserGroupInformation.getCurrentUser().getGroupNames()[0]; + } + String modeString = StringUtils.popOptionWithArgument("-mode", args); + int mode; + if (modeString == null) { + mode = FsPermission.getCachePoolDefault().toShort(); + } else { + mode = Integer.parseInt(modeString, 8); + } + String weightString = StringUtils.popOptionWithArgument("-weight", args); + int weight; + if (weightString == null) { + weight = CachePool.DEFAULT_WEIGHT; + } else { + weight = Integer.parseInt(weightString); + } + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + getShortUsage()); + return 1; + } + DistributedFileSystem dfs = getDFS(conf); + CachePoolInfo info = new CachePoolInfo(name). + setOwnerName(owner). + setGroupName(group). + setMode(new FsPermission((short)mode)). + setWeight(weight); + try { + dfs.addCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully added cache pool " + name + "."); + return 0; + } + } + + private static class ModifyCachePoolCommand implements Command { + + @Override + public String getName() { + return "-modifyPool"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " [-owner ] " + + "[-group ] [-mode ] [-weight ]]\n"; + } + + @Override + public String getLongUsage() { + TableListing listing = getOptionDescriptionListing(); + + listing.addRow("", "Name of the pool to modify."); + listing.addRow("", "Username of the owner of the pool"); + listing.addRow("", "Groupname of the group of the pool."); + listing.addRow("", "Unix-style permissions of the pool in octal."); + listing.addRow("", "Weight of the pool."); + + return getShortUsage() + "\n" + + WordUtils.wrap("Modifies the metadata of an existing cache pool. " + + "See usage of " + AddCachePoolCommand.NAME + " for more details", + MAX_LINE_WIDTH) + "\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + String owner = StringUtils.popOptionWithArgument("-owner", args); + String group = StringUtils.popOptionWithArgument("-group", args); + String modeString = StringUtils.popOptionWithArgument("-mode", args); + Integer mode = (modeString == null) ? + null : Integer.parseInt(modeString, 8); + String weightString = StringUtils.popOptionWithArgument("-weight", args); + Integer weight = (weightString == null) ? + null : Integer.parseInt(weightString); + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when creating a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + getShortUsage()); + return 1; + } + boolean changed = false; + CachePoolInfo info = new CachePoolInfo(name); + if (owner != null) { + info.setOwnerName(owner); + changed = true; + } + if (group != null) { + info.setGroupName(group); + changed = true; + } + if (mode != null) { + info.setMode(new FsPermission(mode.shortValue())); + changed = true; + } + if (weight != null) { + info.setWeight(weight); + changed = true; + } + if (!changed) { + System.err.println("You must specify at least one attribute to " + + "change in the cache pool."); + return 1; + } + DistributedFileSystem dfs = getDFS(conf); + try { + dfs.modifyCachePool(info); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.print("Successfully modified cache pool " + name); + String prefix = " to have "; + if (owner != null) { + System.out.print(prefix + "owner name " + owner); + prefix = " and "; + } + if (group != null) { + System.out.print(prefix + "group name " + group); + prefix = " and "; + } + if (mode != null) { + System.out.print(prefix + "mode " + new FsPermission(mode.shortValue())); + prefix = " and "; + } + if (weight != null) { + System.out.print(prefix + "weight " + weight); + prefix = " and "; + } + System.out.print("\n"); + return 0; + } + } + + private static class RemoveCachePoolCommand implements Command { + + @Override + public String getName() { + return "-removePool"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " ]\n"; + } + + @Override + public String getLongUsage() { + return getShortUsage() + "\n" + + WordUtils.wrap("Remove a cache pool. This also uncaches paths " + + "associated with the pool.\n\n", MAX_LINE_WIDTH) + + " Name of the cache pool to remove.\n"; + } + + @Override + public int run(Configuration conf, List args) throws IOException { + String name = StringUtils.popFirstNonOption(args); + if (name == null) { + System.err.println("You must specify a name when deleting a " + + "cache pool."); + return 1; + } + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + getShortUsage()); + return 1; + } + DistributedFileSystem dfs = getDFS(conf); + try { + dfs.removeCachePool(name); + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.println("Successfully removed cache pool " + name + "."); + return 0; + } + } + + private static class ListCachePoolsCommand implements Command { + + @Override + public String getName() { + return "-listPools"; + } + + @Override + public String getShortUsage() { + return "[" + getName() + " [name]]\n"; + } + + @Override + public String getLongUsage() { + TableListing listing = getOptionDescriptionListing(); + listing.addRow("[name]", "If specified, list only the named cache pool."); + + return getShortUsage() + "\n" + + WordUtils.wrap("Display information about one or more cache pools, " + + "e.g. name, owner, group, permissions, etc.", MAX_LINE_WIDTH) + + "\n\n" + + listing.toString(); + } + + @Override + public int run(Configuration conf, List args) throws IOException { + String name = StringUtils.popFirstNonOption(args); + if (!args.isEmpty()) { + System.err.print("Can't understand arguments: " + + Joiner.on(" ").join(args) + "\n"); + System.err.println("Usage is " + getShortUsage()); + return 1; + } + DistributedFileSystem dfs = getDFS(conf); + TableListing listing = new TableListing.Builder(). + addField("NAME", Justification.LEFT). + addField("OWNER", Justification.LEFT). + addField("GROUP", Justification.LEFT). + addField("MODE", Justification.LEFT). + addField("WEIGHT", Justification.LEFT). + build(); + int numResults = 0; + try { + RemoteIterator iter = dfs.listCachePools(); + while (iter.hasNext()) { + CachePoolInfo info = iter.next(); + if (name == null || info.getPoolName().equals(name)) { + listing.addRow(new String[] { + info.getPoolName(), + info.getOwnerName(), + info.getGroupName(), + info.getMode().toString(), + info.getWeight().toString(), + }); + ++numResults; + if (name != null) { + break; + } + } + } + } catch (IOException e) { + throw new RemoteException(e.getClass().getName(), e.getMessage()); + } + System.out.print(String.format("Found %d result%s.\n", numResults, + (numResults == 1 ? "" : "s"))); + if (numResults > 0) { + System.out.print(listing); + } + // If there are no results, we return 1 (failure exit code); + // otherwise we return 0 (success exit code). + return (numResults == 0) ? 1 : 0; + } + } + private static class HelpCommand implements Command { @Override public String getName() { @@ -224,15 +626,17 @@ public String getShortUsage() { @Override public String getLongUsage() { - return getShortUsage() + - "Get detailed help about a command.\n" + - " The command to get detailed help for. If no " + - " command-name is specified, we will print detailed help " + - " about all commands"; + TableListing listing = getOptionDescriptionListing(); + listing.addRow("", "The command for which to get " + + "detailed help. If no command is specified, print detailed help for " + + "all commands"); + return getShortUsage() + "\n" + + "Get detailed help about a command.\n\n" + + listing.toString(); } @Override - public int run(List args) throws IOException { + public int run(Configuration conf, List args) throws IOException { if (args.size() == 0) { for (Command command : COMMANDS) { System.err.println(command.getLongUsage()); @@ -255,6 +659,7 @@ public int run(List args) throws IOException { System.err.print(separator + c.getName()); separator = ", "; } + System.err.print("\n"); return 1; } System.err.print(command.getLongUsage()); @@ -266,6 +671,10 @@ public int run(List args) throws IOException { new AddPathBasedCacheDirectiveCommand(), new RemovePathBasedCacheDirectiveCommand(), new ListPathBasedCacheDirectiveCommand(), + new AddCachePoolCommand(), + new ModifyCachePoolCommand(), + new RemoveCachePoolCommand(), + new ListCachePoolsCommand(), new HelpCommand(), }; @@ -290,25 +699,4 @@ private static Command determineCommand(String commandName) { } return null; } - - public static void main(String[] argsArray) throws IOException { - if (argsArray.length == 0) { - printUsage(false); - System.exit(1); - } - Command command = determineCommand(argsArray[0]); - if (command == null) { - System.err.println("Can't understand command '" + argsArray[0] + "'"); - if (!argsArray[0].startsWith("-")) { - System.err.println("Command names must start with dashes."); - } - printUsage(false); - System.exit(1); - } - List args = new LinkedList(); - for (int j = 1; j < argsArray.length; j++) { - args.add(argsArray[j]); - } - System.exit(command.run(args)); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index e4b3b8cb056..98691df6a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.LinkedList; import java.util.List; import java.util.TreeSet; @@ -37,8 +36,6 @@ import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -47,17 +44,14 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; -import org.apache.hadoop.hdfs.tools.TableListing.Justification; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; @@ -68,8 +62,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import com.google.common.base.Joiner; - /** * This class provides some DFS administrative access shell commands. */ @@ -463,230 +455,6 @@ public int saveNamespace() throws IOException { return exitCode; } - final private static String ADD_CACHE_POOL_USAGE = - "-addCachePool [-owner ] " + - "[-group ] [-mode ] [-weight ]"; - - public int addCachePool(String argsArray[], int idx) throws IOException { - List args= new LinkedList(); - for (int i = idx; i < argsArray.length; i++) { - args.add(argsArray[i]); - } - String owner = StringUtils.popOptionWithArgument("-owner", args); - if (owner == null) { - owner = UserGroupInformation.getCurrentUser().getShortUserName(); - } - String group = StringUtils.popOptionWithArgument("-group", args); - if (group == null) { - group = UserGroupInformation.getCurrentUser().getGroupNames()[0]; - } - String modeString = StringUtils.popOptionWithArgument("-mode", args); - int mode; - if (modeString == null) { - mode = FsPermission.getCachePoolDefault().toShort(); - } else { - mode = Integer.parseInt(modeString, 8); - } - String weightString = StringUtils.popOptionWithArgument("-weight", args); - int weight; - if (weightString == null) { - weight = CachePool.DEFAULT_WEIGHT; - } else { - weight = Integer.parseInt(weightString); - } - String name = StringUtils.popFirstNonOption(args); - if (name == null) { - System.err.println("You must specify a name when creating a " + - "cache pool."); - return 1; - } - if (!args.isEmpty()) { - System.err.print("Can't understand arguments: " + - Joiner.on(" ").join(args) + "\n"); - System.err.println("Usage is " + ADD_CACHE_POOL_USAGE); - return 1; - } - DistributedFileSystem dfs = getDFS(); - CachePoolInfo info = new CachePoolInfo(name). - setOwnerName(owner). - setGroupName(group). - setMode(new FsPermission((short)mode)). - setWeight(weight); - try { - dfs.addCachePool(info); - } catch (IOException e) { - throw new RemoteException(e.getClass().getName(), e.getMessage()); - } - System.out.println("Successfully added cache pool " + name + "."); - return 0; - } - - final private static String MODIFY_CACHE_POOL_USAGE = - "-modifyCachePool [-owner ] " + - "[-group ] [-mode ] [-weight ]"; - - public int modifyCachePool(String argsArray[], int idx) throws IOException { - List args = new LinkedList(); - for (int i = idx; i < argsArray.length; i++) { - args.add(argsArray[i]); - } - String owner = StringUtils.popOptionWithArgument("-owner", args); - String group = StringUtils.popOptionWithArgument("-group", args); - String modeString = StringUtils.popOptionWithArgument("-mode", args); - Integer mode = (modeString == null) ? - null : Integer.parseInt(modeString, 8); - String weightString = StringUtils.popOptionWithArgument("-weight", args); - Integer weight = (weightString == null) ? - null : Integer.parseInt(weightString); - String name = StringUtils.popFirstNonOption(args); - if (name == null) { - System.err.println("You must specify a name when creating a " + - "cache pool."); - return 1; - } - if (!args.isEmpty()) { - System.err.print("Can't understand arguments: " + - Joiner.on(" ").join(args) + "\n"); - System.err.println("usage is " + MODIFY_CACHE_POOL_USAGE); - return 1; - } - boolean changed = false; - CachePoolInfo info = new CachePoolInfo(name); - if (owner != null) { - info.setOwnerName(owner); - changed = true; - } - if (group != null) { - info.setGroupName(group); - changed = true; - } - if (mode != null) { - info.setMode(new FsPermission(mode.shortValue())); - changed = true; - } - if (weight != null) { - info.setWeight(weight); - changed = true; - } - if (!changed) { - System.err.println("You must specify at least one attribute to " + - "change in the cache pool."); - return 1; - } - DistributedFileSystem dfs = getDFS(); - try { - dfs.modifyCachePool(info); - } catch (IOException e) { - throw new RemoteException(e.getClass().getName(), e.getMessage()); - } - System.out.print("Successfully modified cache pool " + name); - String prefix = " to have "; - if (owner != null) { - System.out.print(prefix + "owner name " + owner); - prefix = "and "; - } - if (group != null) { - System.out.print(prefix + "group name " + group); - prefix = "and "; - } - if (mode != null) { - System.out.print(prefix + "mode " + new FsPermission(mode.shortValue())); - prefix = "and "; - } - if (weight != null) { - System.out.print(prefix + "weight " + weight); - prefix = "and "; - } - System.out.print("\n"); - return 0; - } - - final private static String REMOVE_CACHE_POOL_USAGE = - "-removeCachePool "; - - public int removeCachePool(String argsArray[], int idx) throws IOException { - List args = new LinkedList(); - for (int i = idx; i < argsArray.length; i++) { - args.add(argsArray[i]); - } - String name = StringUtils.popFirstNonOption(args); - if (name == null) { - System.err.println("You must specify a name when deleting a " + - "cache pool."); - return 1; - } - if (!args.isEmpty()) { - System.err.print("Can't understand arguments: " + - Joiner.on(" ").join(args) + "\n"); - System.err.println("Usage is " + REMOVE_CACHE_POOL_USAGE); - return 1; - } - DistributedFileSystem dfs = getDFS(); - try { - dfs.removeCachePool(name); - } catch (IOException e) { - dfs.removeCachePool(name); - throw new RemoteException(e.getClass().getName(), e.getMessage()); - } - System.out.println("Successfully removed cache pool " + name + "."); - return 0; - } - - final private static String LIST_CACHE_POOLS_USAGE = - "-listCachePools] [-verbose] [name]"; - - public int listCachePools(String argsArray[], int idx) throws IOException { - List args = new LinkedList(); - for (int i = idx; i < argsArray.length; i++) { - args.add(argsArray[i]); - } - String name = StringUtils.popFirstNonOption(args); - if (!args.isEmpty()) { - System.err.print("Can't understand arguments: " + - Joiner.on(" ").join(args) + "\n"); - System.err.println("usage is " + LIST_CACHE_POOLS_USAGE); - return 1; - } - DistributedFileSystem dfs = getDFS(); - TableListing listing = new TableListing.Builder(). - addField("NAME", Justification.LEFT). - addField("OWNER", Justification.LEFT). - addField("GROUP", Justification.LEFT). - addField("MODE", Justification.LEFT). - addField("WEIGHT", Justification.RIGHT). - build(); - int numResults = 0; - try { - RemoteIterator iter = dfs.listCachePools(); - while (iter.hasNext()) { - CachePoolInfo info = iter.next(); - if (name == null || info.getPoolName().equals(name)) { - listing.addRow(new String[] { - info.getPoolName(), - info.getOwnerName(), - info.getGroupName(), - info.getMode().toString(), - info.getWeight().toString(), - }); - ++numResults; - if (name != null) { - break; - } - } - } - } catch (IOException e) { - throw new RemoteException(e.getClass().getName(), e.getMessage()); - } - System.out.print(String.format("Found %d result%s.\n", numResults, - (numResults == 1 ? "" : "s"))); - if (numResults > 0) { - System.out.print(listing.build()); - } - // If there are no results, we return 1 (failure exit code); - // otherwise we return 0 (success exit code). - return (numResults == 0) ? 1 : 0; - } - public int rollEdits() throws IOException { DistributedFileSystem dfs = getDFS(); long txid = dfs.rollEdits(); @@ -814,10 +582,6 @@ private void printHelp(String cmd) { "\t[-fetchImage ]\n" + "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + - "\t[" + ADD_CACHE_POOL_USAGE + "]\n" + - "\t[" + MODIFY_CACHE_POOL_USAGE + "]\n" + - "\t[" + REMOVE_CACHE_POOL_USAGE + "]\n" + - "\t[" + LIST_CACHE_POOLS_USAGE + "]\n" + "\t[-help [cmd]]\n"; String report ="-report: \tReports basic filesystem information and statistics.\n"; @@ -915,42 +679,6 @@ private void printHelp(String cmd) { String disallowSnapshot = "-disallowSnapshot :\n" + "\tDo not allow snapshots to be taken on a directory any more.\n"; - String addCachePool = ADD_CACHE_POOL_USAGE + ": \n" + - "\tAdd a new cache pool.\n" + - "\t is the name of the new pool. It must not already be used.\n" + - "\t is the owner of the pool. It defaults to the current\n" + - "\tuser name.\n" + - "\t is the group of the pool. It defaults to the primary\n" + - "\tgroup name of the current user.\n" + - "\t is the mode of the pool. This is a UNIX-style numeric mode\n" + - "\targument, supplied as an octal number. For example, mode 0755\n" + - "\tgrants the owner all permissions, and grants everyone else\n" + - "\tonly read and list permissions.\n" + - "\tThe mode defaults to " + - String.format("0%03o", - FsPermission.getCachePoolDefault().toShort()) + "\n" + - "\t is the weight of the pool. This determines what share \n" + - "\tof cluster resources the pool will get. It defaults to " + - CachePool.DEFAULT_WEIGHT + "\n"; - - String modifyCachePool = MODIFY_CACHE_POOL_USAGE + ": \n" + - "\tAdd a new cache pool with the given name.\n" + - "\t is the name of the pool to modify.\n" + - "\t is the new owner of the pool.\n" + - "\t is the new group of the pool.\n" + - "\t is the new mode of the pool.\n" + - "\t is the new weight of the pool.\n"; - - String removeCachePool = REMOVE_CACHE_POOL_USAGE + ": \n" + - "\tRemove a cache pool.\n" + - "\t is the name of the pool to remove.\n"; - - String listCachePools = " -listCachePools [-name ] [-verbose]\n" + - "\tList cache pools.\n" + - "\tIf is specified, we will list only the cache pool with\n" + - "\tthat name. If is specified, we will list detailed\n" + - "\tinformation about each pool\n"; - String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + "\t\tis specified.\n"; @@ -998,14 +726,6 @@ private void printHelp(String cmd) { System.out.println(allowSnapshot); } else if ("disallowSnapshot".equalsIgnoreCase(cmd)) { System.out.println(disallowSnapshot); - } else if ("addCachePool".equalsIgnoreCase(cmd)) { - System.out.println(addCachePool); - } else if ("modifyCachePool".equalsIgnoreCase(cmd)) { - System.out.println(modifyCachePool); - } else if ("removeCachePool".equalsIgnoreCase(cmd)) { - System.out.println(removeCachePool); - } else if ("listCachePools".equalsIgnoreCase(cmd)) { - System.out.println(listCachePools); } else if ("help".equals(cmd)) { System.out.println(help); } else { @@ -1032,13 +752,6 @@ private void printHelp(String cmd) { System.out.println(fetchImage); System.out.println(allowSnapshot); System.out.println(disallowSnapshot); - System.out.println(addCachePool); - System.out.println(modifyCachePool); - System.out.println(removeCachePool); - System.out.println(listCachePools); - - System.out.println(disallowSnapshot); - System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); @@ -1275,18 +988,6 @@ private static void printUsage(String cmd) { } else if ("-fetchImage".equals(cmd)) { System.err.println("Usage: java DFSAdmin" + " [-fetchImage ]"); - } else if ("-addCachePool".equals(cmd)) { - System.err.println("Usage: java DFSAdmin" - + " [" + ADD_CACHE_POOL_USAGE + "]"); - } else if ("-modifyCachePool".equals(cmd)) { - System.err.println("Usage: java DFSAdmin" - + " [" + MODIFY_CACHE_POOL_USAGE + "]"); - } else if ("-removeCachePool".equals(cmd)) { - System.err.println("Usage: java DFSAdmin" - + " [" + REMOVE_CACHE_POOL_USAGE + "]"); - } else if ("-listCachePools".equals(cmd)) { - System.err.println("Usage: java DFSAdmin" - + " [" + LIST_CACHE_POOLS_USAGE + "]"); } else { System.err.println("Usage: java DFSAdmin"); System.err.println("Note: Administrative commands can only be run as the HDFS superuser."); @@ -1312,10 +1013,6 @@ private static void printUsage(String cmd) { System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); System.err.println(" [-setBalancerBandwidth ]"); System.err.println(" [-fetchImage ]"); - System.err.println(" [" + ADD_CACHE_POOL_USAGE + "]"); - System.err.println(" [" + MODIFY_CACHE_POOL_USAGE + "]"); - System.err.println(" [" + REMOVE_CACHE_POOL_USAGE + "]"); - System.err.println(" [" + LIST_CACHE_POOLS_USAGE + "]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); @@ -1488,14 +1185,6 @@ public int run(String[] argv) throws Exception { exitCode = setBalancerBandwidth(argv, i); } else if ("-fetchImage".equals(cmd)) { exitCode = fetchImage(argv, i); - } else if ("-addCachePool".equals(cmd)) { - exitCode = addCachePool(argv, i); - } else if ("-modifyCachePool".equals(cmd)) { - exitCode = modifyCachePool(argv, i); - } else if ("-removeCachePool".equals(cmd)) { - exitCode = removeCachePool(argv, i); - } else if ("-listCachePools".equals(cmd)) { - exitCode = listCachePools(argv, i); } else if ("-help".equals(cmd)) { if (i < argv.length) { printHelp(argv[i]); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java index aded360a428..857111d551e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java @@ -17,13 +17,23 @@ */ package org.apache.hadoop.hdfs.tools; +import java.util.ArrayList; import java.util.LinkedList; import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.WordUtils; import org.apache.hadoop.classification.InterfaceAudience; /** * This class implements a "table listing" with column headers. + * + * Example: + * + * NAME OWNER GROUP MODE WEIGHT + * pool1 andrew andrew rwxr-xr-x 100 + * pool2 andrew andrew rwxr-xr-x 100 + * pool3 andrew andrew rwxr-xr-x 100 + * */ @InterfaceAudience.Private public class TableListing { @@ -33,39 +43,80 @@ public enum Justification { } private static class Column { - private final LinkedList rows; + private final ArrayList rows; private final Justification justification; - private int maxLength; + private final boolean wrap; - Column(String title, Justification justification) { - this.rows = new LinkedList(); + private int wrapWidth = Integer.MAX_VALUE; + private int maxWidth; + + Column(String title, Justification justification, boolean wrap) { + this.rows = new ArrayList(); this.justification = justification; - this.maxLength = 0; + this.wrap = wrap; + this.maxWidth = 0; addRow(title); } private void addRow(String val) { - if ((val.length() + 1) > maxLength) { - maxLength = val.length() + 1; + if ((val.length() + 1) > maxWidth) { + maxWidth = val.length() + 1; + } + // Ceiling at wrapWidth, because it'll get wrapped + if (maxWidth > wrapWidth) { + maxWidth = wrapWidth; } rows.add(val); } - String getRow(int i) { - String raw = rows.get(i); - int paddingLength = maxLength - raw.length(); - String padding = (paddingLength <= 0) ? "" : - StringUtils.repeat(" ", paddingLength); - if (justification == Justification.LEFT) { - return raw + padding; - } else { - return padding + raw; + private int getMaxWidth() { + return maxWidth; + } + + private void setWrapWidth(int width) { + wrapWidth = width; + // Ceiling the maxLength at wrapWidth + if (maxWidth > wrapWidth) { + maxWidth = wrapWidth; } + // Else we need to traverse through and find the real maxWidth + else { + maxWidth = 0; + for (int i=0; i maxWidth) { + maxWidth = length; + } + } + } + } + + /** + * Return the ith row of the column as a set of wrapped strings, each at + * most wrapWidth in length. + */ + String[] getRow(int idx) { + String raw = rows.get(idx); + // Line-wrap if it's too long + String[] lines = new String[] {raw}; + if (wrap) { + lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n"); + } + for (int i=0; i columns = new LinkedList(); + private boolean showHeader = true; + private int wrapWidth = Integer.MAX_VALUE; /** * Create a new Builder. @@ -74,14 +125,63 @@ public Builder() { } /** - * Add a new field to the Table under construction. - * - * @param title Field title. - * @param leftJustified Whether or not the field is left justified. - * @return this. + * See {@link #addField(String, Justification, boolean) + */ + public Builder addField(String title) { + return addField(title, Justification.LEFT, false); + } + + /** + * See {@link #addField(String, Justification, boolean) */ public Builder addField(String title, Justification justification) { - columns.add(new Column(title, justification)); + return addField(title, justification, false); + } + + /** + * See {@link #addField(String, Justification, boolean) + */ + public Builder addField(String title, boolean wrap) { + return addField(title, Justification.LEFT, wrap); + } + + /** + * Add a new field to the Table under construction. + * + * @param title Field title. + * @param justification Right or left justification. Defaults to left. + * @Param wrapWidth Width at which to auto-wrap the content of the cell. + * Defaults to Integer.MAX_VALUE. + * @return This Builder object + */ + public Builder addField(String title, Justification justification, + boolean wrap) { + columns.add(new Column(title, justification, wrap)); + return this; + } + + /** + * Whether to hide column headers in table output + */ + public Builder hideHeaders() { + this.showHeader = false; + return this; + } + + /** + * Whether to show column headers in table output. This is the default. + */ + public Builder showHeaders() { + this.showHeader = true; + return this; + } + + /** + * Set the maximum width of a row in the TableListing. Must have one or + * more wrappable fields for this to take effect. + */ + public Builder wrapWidth(int width) { + this.wrapWidth = width; return this; } @@ -89,17 +189,22 @@ public Builder addField(String title, Justification justification) { * Create a new TableListing. */ public TableListing build() { - return new TableListing(columns.toArray(new Column[0])); + return new TableListing(columns.toArray(new Column[0]), showHeader, + wrapWidth); } } private final Column columns[]; private int numRows; + private boolean showHeader; + private int wrapWidth; - TableListing(Column columns[]) { + TableListing(Column columns[], boolean showHeader, int wrapWidth) { this.columns = columns; this.numRows = 0; + this.showHeader = showHeader; + this.wrapWidth = wrapWidth; } /** @@ -107,7 +212,7 @@ public TableListing build() { * * @param row The row of objects to add-- one per column. */ - public void addRow(String row[]) { + public void addRow(String... row) { if (row.length != columns.length) { throw new RuntimeException("trying to add a row with " + row.length + " columns, but we have " + columns.length + " columns."); @@ -118,19 +223,67 @@ public void addRow(String row[]) { numRows++; } - /** - * Convert the table to a string. - */ - public String build() { + @Override + public String toString() { StringBuilder builder = new StringBuilder(); - for (int i = 0; i < numRows + 1; i++) { - String prefix = ""; - for (int j = 0; j < columns.length; j++) { - builder.append(prefix); - prefix = " "; - builder.append(columns[j].getRow(i)); + // Calculate the widths of each column based on their maxWidths and + // the wrapWidth for the entire table + int width = (columns.length-1)*2; // inter-column padding + for (int i=0; i wrapWidth) { + boolean modified = false; + for (int i=0; i 4) { + column.setWrapWidth(maxWidth-1); + modified = true; + width -= 1; + if (width <= wrapWidth) { + break; + } + } + } + } + if (!modified) { + break; + } + } + + int startrow = 0; + if (!showHeader) { + startrow = 1; + } + String[][] columnLines = new String[columns.length][]; + for (int i = startrow; i < numRows + 1; i++) { + int maxColumnLines = 0; + for (int j = 0; j < columns.length; j++) { + columnLines[j] = columns[j].getRow(i); + if (columnLines[j].length > maxColumnLines) { + maxColumnLines = columnLines[j].length; + } + } + + for (int c = 0; c < maxColumnLines; c++) { + // First column gets no left-padding + String prefix = ""; + for (int j = 0; j < columns.length; j++) { + // Prepend padding + builder.append(prefix); + prefix = " "; + if (columnLines[j].length > c) { + builder.append(columnLines[j][c]); + } else { + builder.append(StringUtils.repeat(" ", columns[j].maxWidth)); + } + } + builder.append("\n"); } - builder.append("\n"); } return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java new file mode 100644 index 00000000000..f25c4fe01ab --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.cli; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.cli.util.CLICommand; +import org.apache.hadoop.cli.util.CLICommandCacheAdmin; +import org.apache.hadoop.cli.util.CLICommandTypes; +import org.apache.hadoop.cli.util.CLITestCmd; +import org.apache.hadoop.cli.util.CacheAdminCmdExecutor; +import org.apache.hadoop.cli.util.CommandExecutor; +import org.apache.hadoop.cli.util.CommandExecutor.Result; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.HDFSPolicyProvider; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.tools.CacheAdmin; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.xml.sax.SAXException; + +public class TestCacheAdminCLI extends CLITestHelper { + + public static final Log LOG = LogFactory.getLog(TestCacheAdminCLI.class); + + protected MiniDFSCluster dfsCluster = null; + protected FileSystem fs = null; + protected String namenode = null; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, + HDFSPolicyProvider.class, PolicyProvider.class); + + // Many of the tests expect a replication value of 1 in the output + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + + dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + + dfsCluster.waitClusterUp(); + namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///"); + username = System.getProperty("user.name"); + + fs = dfsCluster.getFileSystem(); + assertTrue("Not a HDFS: "+fs.getUri(), + fs instanceof DistributedFileSystem); + } + + @After + @Override + public void tearDown() throws Exception { + if (fs != null) { + fs.close(); + } + if (dfsCluster != null) { + dfsCluster.shutdown(); + } + Thread.sleep(2000); + super.tearDown(); + } + + @Override + protected String getTestFile() { + return "testCacheAdminConf.xml"; + } + + @Override + protected TestConfigFileParser getConfigParser() { + return new TestConfigFileParserCacheAdmin(); + } + + private class TestConfigFileParserCacheAdmin extends + CLITestHelper.TestConfigFileParser { + @Override + public void endElement(String uri, String localName, String qName) + throws SAXException { + if (qName.equals("cache-admin-command")) { + if (testCommands != null) { + testCommands.add(new CLITestCmdCacheAdmin(charString, + new CLICommandCacheAdmin())); + } else if (cleanupCommands != null) { + cleanupCommands.add(new CLITestCmdCacheAdmin(charString, + new CLICommandCacheAdmin())); + } + } else { + super.endElement(uri, localName, qName); + } + } + } + + private class CLITestCmdCacheAdmin extends CLITestCmd { + + public CLITestCmdCacheAdmin(String str, CLICommandTypes type) { + super(str, type); + } + + @Override + public CommandExecutor getExecutor(String tag) + throws IllegalArgumentException { + if (getType() instanceof CLICommandCacheAdmin) { + return new CacheAdminCmdExecutor(tag, new CacheAdmin(conf)); + } + return super.getExecutor(tag); + } + } + + @Override + protected Result execute(CLICommand cmd) throws Exception { + return cmd.getExecutor("").executeCommand(cmd.getCmd()); + } + + @Test + @Override + public void testAll () { + super.testAll(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java new file mode 100644 index 00000000000..e9bf182c992 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCacheAdmin.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.cli.util; + +public class CLICommandCacheAdmin implements CLICommandTypes { +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java new file mode 100644 index 00000000000..922020faf84 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CacheAdminCmdExecutor.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.cli.util; + +import org.apache.hadoop.hdfs.tools.CacheAdmin; +import org.apache.hadoop.util.ToolRunner; + +public class CacheAdminCmdExecutor extends CommandExecutor { + protected String namenode = null; + protected CacheAdmin admin = null; + + public CacheAdminCmdExecutor(String namenode, CacheAdmin admin) { + this.namenode = namenode; + this.admin = admin; + } + + @Override + protected void execute(final String cmd) throws Exception { + String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode); + ToolRunner.run(admin, args); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml new file mode 100644 index 00000000000..0153b72d7d7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -0,0 +1,211 @@ + + + + + + + + test + + + + + + Testing basic usage + + + + + + + + SubstringComparator + Usage: bin/hdfs cacheadmin [COMMAND] + + + + + + Testing listing no cache pools + + -listPools + + + + + + SubstringComparator + Found 0 results. + + + + + + Testing adding a cache pool + + -addPool foo + + + -removePool foo + + + + SubstringComparator + Successfully added cache pool foo. + + + + + + Testing modifying a cache pool + + -addPool poolparty -owner alice -group alicegroup -mode 0000 -weight 50 + -modifyPool poolparty -owner bob -group bobgroup -mode 0777 -weight 51 + -listPools + + + -removePool poolparty + + + + SubstringComparator + poolparty bob bobgroup rwxrwxrwx 51 + + + + + + Testing deleting a cache pool + + -addPool foo + -removePool foo + + + + + + SubstringComparator + Successfully removed cache pool foo. + + + + + + Testing listing all cache pools + + -addPool foo -owner bob -group bob -mode 0664 + -addPool bar -owner alice -group alicegroup -mode 0755 + -listPools + + + -removePool foo + -removePool bar + + + + SubstringComparator + Found 2 results. + + + SubstringComparator + bar alice alicegroup rwxr-xr-x 100 + + + SubstringComparator + foo bob bob rw-rw-r-- 100 + + + + + + Testing listing a single cache pool + + -addPool foo -owner bob -group bob -mode 0664 + -addPool bar -owner alice -group alicegroup -mode 0755 + -listPools foo + + + -removePool foo + -removePool bar + + + + SubstringComparator + Found 1 result. + + + SubstringComparator + foo bob bob rw-rw-r-- 100 + + + + + + Testing creating cache paths + + -addPool pool1 + -addPath -path /foo -pool pool1 + -addPath -path /bar -pool pool1 + -listPaths -pool pool1 + + + -removePool pool1 + + + + SubstringComparator + Found 2 entries + + + SubstringComparator + 1 pool1 /foo + + + SubstringComparator + 2 pool1 /bar + + + + + + Testing removing cache paths + + -addPool pool1 + -addPath -path /foo -pool pool1 + -addPath -path /bar -pool pool1 + -removePool pool1 + -listPaths -pool pool1 + + + + + + SubstringComparator + Found 0 entries + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 92cabe270bf..490885b6c23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -16521,70 +16521,5 @@ - - - Testing listing no cache pools - - -fs NAMENODE -listCachePools - - - - - - SubstringComparator - Found 0 results. - - - - - - Testing adding a cache pool - - -fs NAMENODE -addCachePool foo - - - -fs NAMENODE -removeCachePool foo - - - - SubstringComparator - Successfully added cache pool foo. - - - - - - Testing deleting a cache pool - - -fs NAMENODE -addCachePool foo - -fs NAMENODE -removeCachePool foo - - - - - - SubstringComparator - Successfully removed cache pool foo. - - - - - - Testing listing a cache pool - - -fs NAMENODE -addCachePool foo -owner bob -group bob -mode 0664 - -fs NAMENODE -listCachePools foo - - - -fs NAMENODE -removeCachePool foo - - - - SubstringComparator - bob bob rw-rw-r-- 100 - - - - From 633b693517ec7882629074d78a55524392d88bd7 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Sun, 6 Oct 2013 17:06:03 +0000 Subject: [PATCH 30/51] HDFS-5309. Fix failing caching unit tests. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1529646 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 2 + .../src/test/resources/editsStored | Bin 4210 -> 4493 bytes .../src/test/resources/editsStored.xml | 156 +++++++++--------- .../src/test/resources/testCacheAdminConf.xml | 12 +- 4 files changed, 86 insertions(+), 84 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index afd3afe92cb..1f915df4cc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -72,3 +72,5 @@ HDFS-4949 (Unreleased) (Contributed by Andrew Wang) HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth) + + HDFS-5309. Fix failing caching unit tests. (Andrew Wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 111cec0bef7a2e46ca10ff777da1a0d273352731..2f98a18c32c04a622709aad28e3d2ae1bd2511cf 100644 GIT binary patch literal 4493 zcmcIn3rti;6uoaB?1!K7@ejI6{ej3&5JBr(`KX|Lttcql1zr(>UAnt~)TW6hV#O8{ zt4Y)NNh7h=C?%Lwt46U!txZ}Rt4U3)#@Hr;fcS|`+bErRGqbCYaf6B8P4>>4nS0Ki zd+*GfH#j)>>sTNnG>z`c8$};9LbG5xwl*P2zkaMW_s629+?ufB3N|Wg$mk+{TYCTW z&H0DFEg0C*OHILxG$aiKN55BDEt|{ivc)Nz>x7Is@v$HP2&U$UqFT4c1{%dmYq6Hw zE^hYnAoJmq=bYy=2$Sq z#?74_gMP6vvL$c#=I|U-(Uud@3oO=l8-h0ETx%;dC>#LZRD)*^_YBv=?I~B1(6SWK z@-cl$C~9o=ayf-<<2vg5bZ7VPBp3iX;MjF>?flMlbUYEM6MCX05k^LrSK8$=r_J7I zuBk$8N`8}RPQ?2*Og5bgqfmyS5ZUXuUY$sXeTEK~9GZ28S^EwhPQJNm>Ue7HH*^>b z4y&csvCih?Co*u-)BeIxYVYsa-USuM4O!5_wBwEm6e_?I6yH90-G+VGL~G3oRN#ppa>rgh51% z$(DSjIcEeS$|)rQh?@!u!WkLW;=P9UGfj-l$R$(GQ)I6q!)a1zMK|qY-=2K7(qAW(NwQE?e-G_f>{y|o`S&>Idh0jMdzTS=fGMOQ zX(52VrHPL*tOnP7N!-I4i{c*eU|-7%I^t2+w$F_U6F@lD5`b6d(%arFZ*P%7u{XWF zE`sH4s@i>)8ZT5G=t_bs;^duMnMPs^)3BD%72xUhT{jr=bTTOR`6f3uGUOR*WF-K| z)Jv&6<+_7`+$HejYGkUBK*3C`B>?TIN}uD5CxQ%${be6s-oWspmErAxH5TFu5B9axTjG=>F>ywP31BwX61s048c}$cWhP?60?LMiul>7C+7>O~&Vl4savo2k{&XD8Cpg7P{6||2b$E%T*03cJHQf~;G0J*d8 zcRh8R5qBe*ga0IS&#hnG0gNXBccNrI*z2Ejj`1X_c~}XkcwF^na5K*QIW(2k+gt*U z{$H#&cS7k+;)R5|$cVMFM(m<%d8+;a{4Ak$srn!A3S&=mM;E*0BykUPgrZUTJ0zpD z3|Dbq=BDV31Ac+T8JI`3qz8X;hB_K+sw=HkFQHbdyMn@3a27DgrCg-n_(^LJrA&66 zz?rtv6?RKm6}l9muQ9Z5oJh4L-gXKosDf!n?sv{x7b3-AYMAm0hWit8KYkWM4d)NX z&EbJ&!=kEtv~a=HYywdsaQ(@ zJ{xx~KgjlV8W|KH-nnt9kL~Mp4=Z7Zln*+j9+E>!f1-arGtGE4(_`_EK4Y#drmj3X zy7K&)(c}S^4DW>Y5PAP4=$qf1ZK;v7ZRK))mv#mtTuKQa|2MFG2P4dc7Y#hDM>TWB w*|VRr6AhhLR6KZwLWyhP{S;13xWi0ve2Eet^J*pUQp;xgH2}-*Ke~}4;{X5v literal 4210 zcmcInYfMx}6h6CmUG^c!!xt9uTGWcT!Xk=7wF8IW-CY*9u}y7! zL{o`P`C-#mNlRMdkA@OWQ#4wmjj0w*TN`R)w5F|91o4rW#?)?S?#%4kd#_uX=xnlQ z?wmQ_cjlZkbLWOaq3^~6ZMq#}Ppufk9`w51I#k%x`Ic#E$*aav;r)HbuML{GR?t)J zHq*Sbpa1l5?_b@iYt|W=DHv!6=^-ie6<34gsPRfpzvO7vEzX&32OWS8CGY%Jib!Ih zm#qw&T<$uHuenL`)_Fb6O}Y-4vS@KtQG-iz`yCa_DwY;3uPk1)s;Jmuv(L?)o0B!) zo}2BkgV~;)J1YzS>=0jB9L(ESURb+l%jUf;t4i82vgzfM%j?WC1;7X#48BU))38a)B2mVtUT-L$;^ap`+B5&$uvVHeP}vlbqGHy|)4OjIHP;xlVq zUa7|K@dh1@^|;T-gIub@J-B&2IzN#CnbiOln|5AEWonCBy`gjFF>Y;DtMe{zojQS8 zk5sF{>~lMtd>cJ}?Igx0AJ|=$!t7(C+lQgDH3OE=H@$n^GYl#&8kDwu;=Y9)easw2 z(GD_bXP*~*D~mdoq&?k&PijAMyowimJY~fHQFr9t;W=oVx;2gMaY%R!;=v8JKn!C| z)SROWZ7!%j!fF+gH0OZHEwwCbaZ9*xkB&2*j%A3WwP%AaBmaQ!CA6Zfwj&oT*PwFxx#;|CZ8Cs^yuneg+()Lo1p#?cNM5k~g7Yn*yMtC_JG5)y)5+4&+Gn?!%x(Nmm@*oTlJIxfS~Qrg)lK8 zTk@&?gb1|BClmq@wpLZ8aWW=_n=M_(c5pILlT1EIvAu$fWJ#tKBD5ve{z|^hCKI*X z_%_qb@f8eAUV!a%uDr4^<0E6ZOCm`Zsu=HQWc90R` z*sGa(m=!L-tIRnpNu$7O&?oa&PXB)WmzPAD2q29}1Yr28^y&znv?)|63>0@Y&)`X$ zs`Q@c#ehp=fR;nguUE=MY7EneMEJe`@M7B~j{F=|3WL_XV2~qES0c;n$EF6k>@YS3 zisb+Pg^VJR4C9tg{t$rih|BT?=b1qUab|ASY&qY?c`}qdJpT%waK#xTj6Kz+`@G_2 zQgHnLxP0Ez$Y+?0TFYxBE{2z<7#}3h2(L>qeoI{!x&8-bbQXY(c2JDMxyTjpK64IB z(x@Y9&?i%Db2f>Ut3{azU>1=G-?2JJt-j7vl}VMt(1wAYr94$xO7D5G#o%HaqQ#a! z6gVeRW3Us6@Qt(lo6{FLayC^8w_EEI_jBagN@RKc*fd5iHjGVyBB>vFv1pe_hH=Xw ze+a-FSkn!hXAT*}nY{C1?6MP_XReZm=U>4SE;e(7F}*jXh!43kl7d5y%y@bVPnx5+cY>r#y0QP)M~wnGMfwV9`RkYfzJ!dq27sn5MJn(uH_ zoxc_(P*-3+mB<9K^z`%v8yj5i`o~a{Q1n_EyjA6XNRzOjL+Xl6WQ2J@t;#&rE4|K| zdVCMUA6^(W4vPhCE8=uGoH~^HS$f>iP1nIBW|lu}vz(4Ihnq$-v*-WY?C|PqAFk~23ty|uOm$?219_HHGh!7pPLW`_7EE-}yd4Q5M2Cy#;UjyL8h}T6ykmZd&mFOar#BZ*W%% znJf4HIPvD4vGf6ohI!;giG1u5{N3!Ra5hR6o?7YLp6);SK3>E~Ad1ucKG4^xk+?`O aC=#x5(u8~Jgr2=Nc|E)OLh2 1 - 1381014414770 - 0ed3ccccde5c0830 + 1381693732152 + 6a186e50c85a8650 @@ -24,8 +24,8 @@ 3 2 - 1381014414779 - 1619312c238cd1b1 + 1381693732162 + ae45ee278252aab6 @@ -37,18 +37,18 @@ 16386 /file_create_u\0001;F431 1 - 1380323216882 - 1380323216882 + 1381002533488 + 1381002533488 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 7 + 8e519582-234f-415a-85b8-2f3d616eb06f + 9 @@ -59,8 +59,8 @@ 0 /file_create_u\0001;F431 1 - 1380323216937 - 1380323216882 + 1381002533532 + 1381002533488 512 @@ -78,9 +78,9 @@ 0 /file_create_u\0001;F431 /file_moved - 1380323216955 - ff07f00d-efa9-4b76-a064-63604cd3286e - 9 + 1381002533550 + 8e519582-234f-415a-85b8-2f3d616eb06f + 11 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1380323216966 - ff07f00d-efa9-4b76-a064-63604cd3286e - 10 + 1381002533572 + 8e519582-234f-415a-85b8-2f3d616eb06f + 12 @@ -101,7 +101,7 @@ 0 16387 /directory_mkdir - 1380323216981 + 1381002533598 andrew supergroup @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - ff07f00d-efa9-4b76-a064-63604cd3286e - 15 + 8e519582-234f-415a-85b8-2f3d616eb06f + 17 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - ff07f00d-efa9-4b76-a064-63604cd3286e - 16 + 8e519582-234f-415a-85b8-2f3d616eb06f + 18 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - ff07f00d-efa9-4b76-a064-63604cd3286e - 17 + 8e519582-234f-415a-85b8-2f3d616eb06f + 19 @@ -169,18 +169,18 @@ 16388 /file_create_u\0001;F431 1 - 1380323217070 - 1380323217070 + 1381002533697 + 1381002533697 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 18 + 8e519582-234f-415a-85b8-2f3d616eb06f + 20 @@ -191,8 +191,8 @@ 0 /file_create_u\0001;F431 1 - 1380323217079 - 1380323217070 + 1381002533707 + 1381002533697 512 @@ -253,10 +253,10 @@ 0 /file_create_u\0001;F431 /file_moved - 1380323217151 + 1381002533778 NONE - ff07f00d-efa9-4b76-a064-63604cd3286e - 25 + 8e519582-234f-415a-85b8-2f3d616eb06f + 27 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1380323217170 - 1380323217170 + 1381002533795 + 1381002533795 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 27 + 8e519582-234f-415a-85b8-2f3d616eb06f + 29 @@ -388,8 +388,8 @@ 0 /file_concat_target 1 - 1380323217424 - 1380323217170 + 1381002534037 + 1381002533795 512 @@ -423,18 +423,18 @@ 16390 /file_concat_0 1 - 1380323217436 - 1380323217436 + 1381002534049 + 1381002534049 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 40 + 8e519582-234f-415a-85b8-2f3d616eb06f + 42 @@ -544,8 +544,8 @@ 0 /file_concat_0 1 - 1380323217529 - 1380323217436 + 1381002534137 + 1381002534049 512 @@ -579,18 +579,18 @@ 16391 /file_concat_1 1 - 1380323217542 - 1380323217542 + 1381002534149 + 1381002534149 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 52 + 8e519582-234f-415a-85b8-2f3d616eb06f + 54 @@ -700,8 +700,8 @@ 0 /file_concat_1 1 - 1380323217613 - 1380323217542 + 1381002534219 + 1381002534149 512 @@ -733,13 +733,13 @@ 56 0 /file_concat_target - 1380323217627 + 1381002534232 /file_concat_0 /file_concat_1 - ff07f00d-efa9-4b76-a064-63604cd3286e - 63 + 8e519582-234f-415a-85b8-2f3d616eb06f + 65 @@ -750,15 +750,15 @@ 16392 /file_symlink /file_concat_target - 1380323217643 - 1380323217643 + 1381002534247 + 1381002534247 andrew supergroup 511 - ff07f00d-efa9-4b76-a064-63604cd3286e - 64 + 8e519582-234f-415a-85b8-2f3d616eb06f + 66 @@ -771,11 +771,11 @@ andrew JobTracker - 1380323217655 - 1380928017655 + 1381002534260 + 1381607334260 2 - 1380409617655 + 1381088934260 @@ -788,11 +788,11 @@ andrew JobTracker - 1380323217655 - 1380928017655 + 1381002534260 + 1381607334260 2 - 1380409617701 + 1381088934303 @@ -805,8 +805,8 @@ andrew JobTracker - 1380323217655 - 1380928017655 + 1381002534260 + 1381607334260 2 @@ -865,18 +865,18 @@ 16393 /hard-lease-recovery-test 1 - 1380323217822 - 1380323217822 + 1381002534420 + 1381002534420 512 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 127.0.0.1 andrew supergroup 420 - ff07f00d-efa9-4b76-a064-63604cd3286e - 73 + 8e519582-234f-415a-85b8-2f3d616eb06f + 75 @@ -932,7 +932,7 @@ OP_REASSIGN_LEASE 72 - DFSClient_NONMAPREDUCE_1160098410_1 + DFSClient_NONMAPREDUCE_2001868554_1 /hard-lease-recovery-test HDFS_NameNode @@ -961,8 +961,8 @@ 0 /hard-lease-recovery-test 1 - 1380323222701 - 1380323217822 + 1381002539323 + 1381002534420 512 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index 0153b72d7d7..07fb44cc331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -165,9 +165,9 @@ Testing creating cache paths -addPool pool1 - -addPath -path /foo -pool pool1 - -addPath -path /bar -pool pool1 - -listPaths -pool pool1 + -addDirective -path /foo -pool pool1 + -addDirective -path /bar -pool pool1 + -listDirectives -pool pool1 -removePool pool1 @@ -192,10 +192,10 @@ Testing removing cache paths -addPool pool1 - -addPath -path /foo -pool pool1 - -addPath -path /bar -pool pool1 + -addDirective -path /foo -pool pool1 + -addDirective -path /bar -pool pool1 -removePool pool1 - -listPaths -pool pool1 + -listDirectives -pool pool1 From b60e18db743b8933d96384942046ea57e725855d Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 7 Oct 2013 21:26:01 +0000 Subject: [PATCH 31/51] HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1530073 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/protocol/CachePoolInfo.java | 37 ++++++- .../hdfs/server/namenode/CacheManager.java | 19 ++-- .../hdfs/server/namenode/CachePool.java | 103 ++++++++---------- .../hdfs/server/namenode/FSEditLog.java | 2 +- .../hdfs/server/namenode/FSEditLogLoader.java | 2 +- .../hdfs/server/namenode/FSEditLogOp.java | 24 ++-- .../hdfs/server/namenode/FSNamesystem.java | 4 +- 8 files changed, 107 insertions(+), 87 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 1f915df4cc8..b391a418699 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -74,3 +74,6 @@ HDFS-4949 (Unreleased) HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth) HDFS-5309. Fix failing caching unit tests. (Andrew Wang) + + HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick + McCabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java index d6894a7c044..68cd0bb245b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolInfo.java @@ -26,17 +26,31 @@ import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; +import org.apache.hadoop.hdfs.util.XMLUtils; +import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; +import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.Text; +import org.xml.sax.ContentHandler; +import org.xml.sax.SAXException; /** - * Information about a cache pool. + * CachePoolInfo describes a cache pool. + * + * This class is used in RPCs to create and modify cache pools. + * It is serializable and can be stored in the edit log. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CachePoolInfo { + public static final Log LOG = LogFactory.getLog(CachePoolInfo.class); + final String poolName; @Nullable @@ -191,4 +205,23 @@ public void writeTo(DataOutput out) throws IOException { out.writeInt(weight); } } -} + + public void writeXmlTo(ContentHandler contentHandler) throws SAXException { + XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); + PermissionStatus perm = new PermissionStatus(ownerName, + groupName, mode); + FSEditLogOp.permissionStatusToXml(contentHandler, perm); + XMLUtils.addSaxString(contentHandler, "WEIGHT", Integer.toString(weight)); + } + + public static CachePoolInfo readXmlFrom(Stanza st) throws InvalidXmlException { + String poolName = st.getValue("POOLNAME"); + PermissionStatus perm = FSEditLogOp.permissionStatusFromXml(st); + int weight = Integer.parseInt(st.getValue("WEIGHT")); + return new CachePoolInfo(poolName). + setOwnerName(perm.getUserName()). + setGroupName(perm.getGroupName()). + setMode(perm.getPermission()). + setWeight(weight); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index bb5e07848b0..a7d9f0698a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -371,7 +371,7 @@ void unprotectedRemoveDescriptor(long id) throws IOException { * @param info The info for the cache pool to create. * @return the created CachePool */ - public synchronized CachePool addCachePool(CachePoolInfo info) + public synchronized CachePoolInfo addCachePool(CachePoolInfo info) throws IOException { CachePoolInfo.validate(info); String poolName = info.getPoolName(); @@ -379,11 +379,9 @@ public synchronized CachePool addCachePool(CachePoolInfo info) if (pool != null) { throw new IOException("cache pool " + poolName + " already exists."); } - CachePool cachePool = new CachePool(poolName, - info.getOwnerName(), info.getGroupName(), info.getMode(), - info.getWeight()); - unprotectedAddCachePool(cachePool); - return cachePool; + pool = CachePool.createFromInfoAndDefaults(info); + cachePools.put(pool.getPoolName(), pool); + return pool.getInfo(true); } /** @@ -392,8 +390,9 @@ public synchronized CachePool addCachePool(CachePoolInfo info) * * @param pool to be added */ - void unprotectedAddCachePool(CachePool pool) { + void unprotectedAddCachePool(CachePoolInfo info) { assert namesystem.hasWriteLock(); + CachePool pool = CachePool.createFromInfo(info); cachePools.put(pool.getPoolName(), pool); LOG.info("created new cache pool " + pool); } @@ -538,7 +537,7 @@ private synchronized void savePools(DataOutput out, Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step); out.writeInt(cachePools.size()); for (CachePool pool: cachePools.values()) { - pool.writeTo(out); + pool.getInfo(true).writeTo(out); counter.increment(); } prog.endStep(Phase.SAVING_CHECKPOINT, step); @@ -576,8 +575,8 @@ private synchronized void loadPools(DataInput in) prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); for (int i = 0; i < numberOfPools; i++) { - CachePool pool = CachePool.readFrom(in); - unprotectedAddCachePool(pool); + CachePoolInfo info = CachePoolInfo.readFrom(in); + unprotectedAddCachePool(info); counter.increment(); } prog.endStep(Phase.LOADING_FSIMAGE, step); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index ff580f032df..0bc5bb4c6a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; import javax.annotation.Nonnull; @@ -28,15 +26,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.util.XMLUtils; -import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; -import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; -import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; -import org.xml.sax.ContentHandler; -import org.xml.sax.SAXException; + +import com.google.common.base.Preconditions; /** * A CachePool describes a set of cache resources being managed by the NameNode. @@ -44,6 +37,8 @@ * * This is an internal class, only used on the NameNode. For identifying or * describing a cache pool to clients, please use CachePoolInfo. + * + * CachePools must be accessed under the FSNamesystem lock. */ @InterfaceAudience.Private public final class CachePool { @@ -73,29 +68,57 @@ public final class CachePool { private int weight; - public CachePool(String poolName, String ownerName, String groupName, - FsPermission mode, Integer weight) throws IOException { - this.poolName = poolName; + /** + * Create a new cache pool based on a CachePoolInfo object and the defaults. + * We will fill in information that was not supplied according to the + * defaults. + */ + static CachePool createFromInfoAndDefaults(CachePoolInfo info) + throws IOException { UserGroupInformation ugi = null; + String ownerName = info.getOwnerName(); if (ownerName == null) { if (ugi == null) { ugi = NameNode.getRemoteUser(); } - this.ownerName = ugi.getShortUserName(); - } else { - this.ownerName = ownerName; + ownerName = ugi.getShortUserName(); } + String groupName = info.getGroupName(); if (groupName == null) { if (ugi == null) { ugi = NameNode.getRemoteUser(); } - this.groupName = ugi.getPrimaryGroupName(); - } else { - this.groupName = groupName; + groupName = ugi.getPrimaryGroupName(); } - this.mode = mode != null ? - new FsPermission(mode): FsPermission.getCachePoolDefault(); - this.weight = weight != null ? weight : DEFAULT_WEIGHT; + FsPermission mode = (info.getMode() == null) ? + FsPermission.getCachePoolDefault() : info.getMode(); + Integer weight = (info.getWeight() == null) ? + DEFAULT_WEIGHT : info.getWeight(); + return new CachePool(info.getPoolName(), + ownerName, groupName, mode, weight); + } + + /** + * Create a new cache pool based on a CachePoolInfo object. + * No fields in the CachePoolInfo can be blank. + */ + static CachePool createFromInfo(CachePoolInfo info) { + return new CachePool(info.getPoolName(), + info.getOwnerName(), info.getGroupName(), + info.getMode(), info.getWeight()); + } + + CachePool(String poolName, String ownerName, String groupName, + FsPermission mode, int weight) { + Preconditions.checkNotNull(poolName); + Preconditions.checkNotNull(ownerName); + Preconditions.checkNotNull(groupName); + Preconditions.checkNotNull(mode); + this.poolName = poolName; + this.ownerName = ownerName; + this.groupName = groupName; + this.mode = new FsPermission(mode); + this.weight = weight; } public String getPoolName() { @@ -171,42 +194,4 @@ public String toString() { append(", weight:").append(weight). append(" }").toString(); } - - public void writeTo(DataOutput out) throws IOException { - Text.writeString(out, poolName); - PermissionStatus perm = PermissionStatus.createImmutable( - ownerName, groupName, mode); - perm.write(out); - out.writeInt(weight); - } - - public static CachePool readFrom(DataInput in) throws IOException { - String poolName = Text.readString(in); - PermissionStatus perm = PermissionStatus.read(in); - int weight = in.readInt(); - return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), - perm.getPermission(), weight); - } - - public void writeXmlTo(ContentHandler contentHandler) throws SAXException { - XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName); - PermissionStatus perm = new PermissionStatus(ownerName, - groupName, mode); - FSEditLogOp.permissionStatusToXml(contentHandler, perm); - XMLUtils.addSaxString(contentHandler, "WEIGHT", Integer.toString(weight)); - } - - public static CachePool readXmlFrom(Stanza st) throws InvalidXmlException { - String poolName = st.getValue("POOLNAME"); - PermissionStatus perm = FSEditLogOp.permissionStatusFromXml(st); - int weight = Integer.parseInt(st.getValue("WEIGHT")); - try { - return new CachePool(poolName, perm.getUserName(), perm.getGroupName(), - perm.getPermission(), weight); - } catch (IOException e) { - String error = "Invalid cache pool XML, missing fields."; - LOG.warn(error); - throw new InvalidXmlException(error); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 3289799fb5b..10aad74e03a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -971,7 +971,7 @@ void logRemovePathBasedCacheDescriptor(Long id, boolean toLogRpcIds) { logEdit(op); } - void logAddCachePool(CachePool pool, boolean toLogRpcIds) { + void logAddCachePool(CachePoolInfo pool, boolean toLogRpcIds) { AddCachePoolOp op = AddCachePoolOp.getInstance(cache.get()).setPool(pool); logRpcIds(op, toLogRpcIds); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 3233c1eb419..bd13ca4af79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -664,7 +664,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_ADD_CACHE_POOL: { AddCachePoolOp addOp = (AddCachePoolOp) op; - fsNamesys.getCacheManager().unprotectedAddCachePool(addOp.pool); + fsNamesys.getCacheManager().unprotectedAddCachePool(addOp.info); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index da5a04a2094..9a9e1994982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -2966,7 +2966,7 @@ public String toString() { } static class AddCachePoolOp extends FSEditLogOp { - CachePool pool; + CachePoolInfo info; public AddCachePoolOp() { super(OP_ADD_CACHE_POOL); @@ -2976,40 +2976,40 @@ static AddCachePoolOp getInstance(OpInstanceCache cache) { return (AddCachePoolOp) cache.get(OP_ADD_CACHE_POOL); } - public AddCachePoolOp setPool(CachePool pool) { - this.pool = pool; + public AddCachePoolOp setPool(CachePoolInfo info) { + this.info = info; return this; } @Override void readFields(DataInputStream in, int logVersion) throws IOException { - pool = CachePool.readFrom(in); + info = CachePoolInfo.readFrom(in); } @Override public void writeFields(DataOutputStream out) throws IOException { - pool.writeTo(out); + info .writeTo(out); } @Override protected void toXml(ContentHandler contentHandler) throws SAXException { - pool.writeXmlTo(contentHandler); + info .writeXmlTo(contentHandler); } @Override void fromXml(Stanza st) throws InvalidXmlException { - this.pool = CachePool.readXmlFrom(st); + this.info = CachePoolInfo.readXmlFrom(st); } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("AddCachePoolOp ["); - builder.append("poolName=" + pool.getPoolName() + ","); - builder.append("ownerName=" + pool.getOwnerName() + ","); - builder.append("groupName=" + pool.getGroupName() + ","); - builder.append("mode=" + Short.toString(pool.getMode().toShort()) + ","); - builder.append("weight=" + Integer.toString(pool.getWeight()) + "]"); + builder.append("poolName=" + info.getPoolName() + ","); + builder.append("ownerName=" + info.getOwnerName() + ","); + builder.append("groupName=" + info.getGroupName() + ","); + builder.append("mode=" + Short.toString(info.getMode().toShort()) + ","); + builder.append("weight=" + Integer.toString(info.getWeight()) + "]"); return builder.toString(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6c5040989e5..fad97dbfdac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6989,8 +6989,8 @@ public void addCachePool(CachePoolInfo req) throws IOException { if (pc != null) { pc.checkSuperuserPrivilege(); } - CachePool pool = cacheManager.addCachePool(req); - getEditLog().logAddCachePool(pool, cacheEntry != null); + CachePoolInfo info = cacheManager.addCachePool(req); + getEditLog().logAddCachePool(info, cacheEntry != null); success = true; } finally { writeUnlock(); From 54801ba4f96322fb98c52ea2184c86b061ea43c5 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 8 Oct 2013 20:01:48 +0000 Subject: [PATCH 32/51] Addenum for HDFS-5309, update binary edits again git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1530410 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/test/resources/editsStored | Bin 4493 -> 4497 bytes .../src/test/resources/editsStored.xml | 156 +++++++++--------- 2 files changed, 78 insertions(+), 78 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index 2f98a18c32c04a622709aad28e3d2ae1bd2511cf..a764fb61990c05ff91d5fffe4dadf36e769fdf24 100644 GIT binary patch delta 1181 zcmeBGo~S%QPXADjYdwd*`GMQTPQOh>e{XjZWq<%?rY=TDplm0HyR7eB1_$kBo5jB# zo#?B>Gt)B1fPsP05kyZm;I}l?H8eIcG%~TYw2U`o;50Nc*E0Y@Lk0o$HI*MOS9lsX zc^_KXF|Btx&}5Donm@%Rz6YBUjbw@f%dr`Y>L+_M>W2Z%&BdyhYe8 z(=u~X<8$-NQd6KNwPMxBJy~a_*W?R~>L8~tfGS-uaiTEm+Z9r~I44UpwMt-ds({_G z^a+!9Fc~Xikrez;7(YWA>K4AqjZ8AqMp!Ku+Mn(zGTE6KIg1tSkYuO{$s{{OCfBD! zW-||4J|oaWd0}vK+SxN$w&xo)P5#Ud3gjOgc{n@5bcT4+79=wV0g1E;jiS zkGkAU%Y0|7f$AWlwI_43Ij<$q%lRP?lk!7=NlW@aR`rhWXFZ%Wc@?ih5Kw&rFo*4R z{hYuEP8RdxPbVCH_<~y)YP&T9TVh^HQEEAtSAJ4RQDSm-Y7qla2R4lpuZV8ynJmMn z;D93A#Qx%*GSoO*&Vu~>oPxxnl1fIX!El=KEilb6q;&8!HB7GHTLbh<0gw-}s(^vy z!1Zc5A+~VxEdJ|Uh;m_awSW#w&&iz%lUED$0*!C_1~=YC&_dAur$mD|)SW(ohs@Rj E0AZ7Op8x;= delta 1202 zcmbQJ+^al6PJeT(sRc)tL|(v&sJ4Jy+30jp1_)qg>SA;R$~tkZbA6}Y6trqv@Q404 z6Mc1fy0oGS7#J8GLG)x<7RAYPm?aF23=9k{%q&bzP2vq1I1P==^$dW}kU^j?aB7pX zzhhME4t?9iybbw4(>eXGt_z>|9&FMaB$E_ax~(r+O!j8f4+EOL4y#`7zms0xV1l}l zoq=0FEi)%IJ~zKCH3e$Y5v=-n4sP1?ZSn<1b&&J#L6zQ{I8m7OZH1!a?#a?jtrA$A zDk%T=UHIf3OvZ{>B!%o|S&R%=T_as}$>IFuELN~<7DJ6#OqOe8 z!w=ZkZRTOiX9W5zW-8pQcJ>UG^<8s)CVyrJ1$fLth)m2vvTV`%e57;6<_R3pU|W8| z&HB!n!D9S!^Sa5&Tp(LwSs^m9tYq0@rnJVuXcITsl2~ZS0*y-N$>5#SwdsfeG@dOc zC$Ni6{=}m$2edH)YXCdyyDjFKY|d*5a#T9hQR%?6CjB3)dMA$Ro+Xo4@hSuX)t3OX z-8!>}C5*sC&gj@5u(Ra;F>PjHsO{DaY>9a(MXBXnUinEOMTyDTsYMJx9oRI^Hx20c zH(7>H!3IUPgkgiUGSoO*&Vu~>oPxxnl1iw#P>S&_P(@0c$eLr5Yxvdx{ShYu@kg8p zS%K?)|4HD=$@BQHb0G?f$@Ky{EI~)uG$yYX=mlC)Xbrc*L(oDn@MK?;IMkg!frrf2 F0swBcU;_XE diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml index f53731bbc58..10e70326cdf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml @@ -13,8 +13,8 @@ 2 1 - 1381693732152 - 6a186e50c85a8650 + 1381946377599 + 4f37c0db7342fb35 @@ -24,8 +24,8 @@ 3 2 - 1381693732162 - ae45ee278252aab6 + 1381946377609 + 471d4ddd00402ba6 @@ -37,18 +37,18 @@ 16386 /file_create_u\0001;F431 1 - 1381002533488 - 1381002533488 + 1381255179312 + 1381255179312 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 9 + 27ac79f0-d378-4933-824b-c2a188968d97 + 8 @@ -59,8 +59,8 @@ 0 /file_create_u\0001;F431 1 - 1381002533532 - 1381002533488 + 1381255179355 + 1381255179312 512 @@ -78,9 +78,9 @@ 0 /file_create_u\0001;F431 /file_moved - 1381002533550 - 8e519582-234f-415a-85b8-2f3d616eb06f - 11 + 1381255179373 + 27ac79f0-d378-4933-824b-c2a188968d97 + 10 @@ -89,9 +89,9 @@ 7 0 /file_moved - 1381002533572 - 8e519582-234f-415a-85b8-2f3d616eb06f - 12 + 1381255179397 + 27ac79f0-d378-4933-824b-c2a188968d97 + 11 @@ -101,7 +101,7 @@ 0 16387 /directory_mkdir - 1381002533598 + 1381255179424 andrew supergroup @@ -136,8 +136,8 @@ 12 /directory_mkdir snapshot1 - 8e519582-234f-415a-85b8-2f3d616eb06f - 17 + 27ac79f0-d378-4933-824b-c2a188968d97 + 16 @@ -147,8 +147,8 @@ /directory_mkdir snapshot1 snapshot2 - 8e519582-234f-415a-85b8-2f3d616eb06f - 18 + 27ac79f0-d378-4933-824b-c2a188968d97 + 17 @@ -157,8 +157,8 @@ 14 /directory_mkdir snapshot2 - 8e519582-234f-415a-85b8-2f3d616eb06f - 19 + 27ac79f0-d378-4933-824b-c2a188968d97 + 18 @@ -169,18 +169,18 @@ 16388 /file_create_u\0001;F431 1 - 1381002533697 - 1381002533697 + 1381255179522 + 1381255179522 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 20 + 27ac79f0-d378-4933-824b-c2a188968d97 + 19 @@ -191,8 +191,8 @@ 0 /file_create_u\0001;F431 1 - 1381002533707 - 1381002533697 + 1381255179531 + 1381255179522 512 @@ -253,10 +253,10 @@ 0 /file_create_u\0001;F431 /file_moved - 1381002533778 + 1381255179602 NONE - 8e519582-234f-415a-85b8-2f3d616eb06f - 27 + 27ac79f0-d378-4933-824b-c2a188968d97 + 26 @@ -267,18 +267,18 @@ 16389 /file_concat_target 1 - 1381002533795 - 1381002533795 + 1381255179619 + 1381255179619 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 29 + 27ac79f0-d378-4933-824b-c2a188968d97 + 28 @@ -388,8 +388,8 @@ 0 /file_concat_target 1 - 1381002534037 - 1381002533795 + 1381255179862 + 1381255179619 512 @@ -423,18 +423,18 @@ 16390 /file_concat_0 1 - 1381002534049 - 1381002534049 + 1381255179876 + 1381255179876 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 42 + 27ac79f0-d378-4933-824b-c2a188968d97 + 41 @@ -544,8 +544,8 @@ 0 /file_concat_0 1 - 1381002534137 - 1381002534049 + 1381255179957 + 1381255179876 512 @@ -579,18 +579,18 @@ 16391 /file_concat_1 1 - 1381002534149 - 1381002534149 + 1381255179967 + 1381255179967 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 54 + 27ac79f0-d378-4933-824b-c2a188968d97 + 53 @@ -700,8 +700,8 @@ 0 /file_concat_1 1 - 1381002534219 - 1381002534149 + 1381255180085 + 1381255179967 512 @@ -733,13 +733,13 @@ 56 0 /file_concat_target - 1381002534232 + 1381255180099 /file_concat_0 /file_concat_1 - 8e519582-234f-415a-85b8-2f3d616eb06f - 65 + 27ac79f0-d378-4933-824b-c2a188968d97 + 64 @@ -750,15 +750,15 @@ 16392 /file_symlink /file_concat_target - 1381002534247 - 1381002534247 + 1381255180116 + 1381255180116 andrew supergroup 511 - 8e519582-234f-415a-85b8-2f3d616eb06f - 66 + 27ac79f0-d378-4933-824b-c2a188968d97 + 65 @@ -771,11 +771,11 @@ andrew JobTracker - 1381002534260 - 1381607334260 + 1381255180128 + 1381859980128 2 - 1381088934260 + 1381341580128 @@ -788,11 +788,11 @@ andrew JobTracker - 1381002534260 - 1381607334260 + 1381255180128 + 1381859980128 2 - 1381088934303 + 1381341580177 @@ -805,8 +805,8 @@ andrew JobTracker - 1381002534260 - 1381607334260 + 1381255180128 + 1381859980128 2 @@ -865,18 +865,18 @@ 16393 /hard-lease-recovery-test 1 - 1381002534420 - 1381002534420 + 1381255180288 + 1381255180288 512 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 127.0.0.1 andrew supergroup 420 - 8e519582-234f-415a-85b8-2f3d616eb06f - 75 + 27ac79f0-d378-4933-824b-c2a188968d97 + 74 @@ -932,7 +932,7 @@ OP_REASSIGN_LEASE 72 - DFSClient_NONMAPREDUCE_2001868554_1 + DFSClient_NONMAPREDUCE_-134124999_1 /hard-lease-recovery-test HDFS_NameNode @@ -961,8 +961,8 @@ 0 /hard-lease-recovery-test 1 - 1381002539323 - 1381002534420 + 1381255185142 + 1381255180288 512 From 3fc8792b5c75fca9fc4f6cf4b95fb2927c62e624 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 9 Oct 2013 21:30:08 +0000 Subject: [PATCH 33/51] HDFS-5304. Expose if a block replica is cached in getFileBlockLocations. (Contributed by Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1530802 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/protocol/LocatedBlock.java | 61 ++++++++++++++++++- .../hadoop/hdfs/protocolPB/PBHelper.java | 28 ++++++++- .../CacheReplicationManager.java | 9 +++ .../hdfs/server/namenode/FSNamesystem.java | 4 ++ .../hadoop-hdfs/src/main/proto/hdfs.proto | 1 + .../TestCacheReplicationManager.java | 14 +++++ 7 files changed, 115 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index b391a418699..54e697a3fd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -54,6 +54,9 @@ HDFS-4949 (Unreleased) HDFS-5190. Move cache pool related CLI commands to CacheAdmin. (Contributed by Andrew Wang) + HDFS-5304. Expose if a block replica is cached in getFileBlockLocations. + (Contributed by Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index d9da5b845b7..ddd39d1c872 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -17,15 +17,21 @@ */ package org.apache.hadoop.hdfs.protocol; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.security.token.Token; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + /** * Associates a block with the Datanodes that contain its replicas * and other block metadata (E.g. the file offset associated with this - * block, whether it is corrupt, security token, etc). + * block, whether it is corrupt, a location is cached in memory, + * security token, etc). */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -39,9 +45,16 @@ public class LocatedBlock { // their locations are not part of this object private boolean corrupt; private Token blockToken = new Token(); + /** + * List of cached datanode locations + */ + private DatanodeInfo[] cachedLocs; + + // Used when there are no locations + private static final DatanodeInfo[] EMPTY_LOCS = new DatanodeInfo[0]; public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) { - this(b, locs, -1, false); // startOffset is unknown + this(b, locs, -1); // startOffset is unknown } public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset) { @@ -50,14 +63,26 @@ public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset) { public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset, boolean corrupt) { + this(b, locs, startOffset, corrupt, EMPTY_LOCS); + } + + public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs, long startOffset, + boolean corrupt, DatanodeInfo[] cachedLocs) { this.b = b; this.offset = startOffset; this.corrupt = corrupt; if (locs==null) { - this.locs = new DatanodeInfo[0]; + this.locs = EMPTY_LOCS; } else { this.locs = locs; } + Preconditions.checkArgument(cachedLocs != null, + "cachedLocs should not be null, use a different constructor"); + if (cachedLocs.length == 0) { + this.cachedLocs = EMPTY_LOCS; + } else { + this.cachedLocs = cachedLocs; + } } public Token getBlockToken() { @@ -96,6 +121,36 @@ public boolean isCorrupt() { return this.corrupt; } + /** + * Add a the location of a cached replica of the block. + * + * @param loc of datanode with the cached replica + */ + public void addCachedLoc(DatanodeInfo loc) { + List cachedList = Lists.newArrayList(cachedLocs); + if (cachedList.contains(loc)) { + return; + } + // Try to re-use a DatanodeInfo already in loc + for (int i=0; i cachedLocs = + Lists.newLinkedList(Arrays.asList(b.getCachedLocations())); for (int i = 0; i < locs.length; i++) { - builder.addLocs(i, PBHelper.convert(locs[i])); + DatanodeInfo loc = locs[i]; + builder.addLocs(i, PBHelper.convert(loc)); + boolean locIsCached = cachedLocs.contains(loc); + builder.addIsCached(locIsCached); + if (locIsCached) { + cachedLocs.remove(loc); + } } + Preconditions.checkArgument(cachedLocs.size() == 0, + "Found additional cached replica locations that are not in the set of" + + " storage-backed locations!"); + return builder.setB(PBHelper.convert(b.getBlock())) .setBlockToken(PBHelper.convert(b.getBlockToken())) .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); @@ -581,9 +594,20 @@ public static LocatedBlock convert(LocatedBlockProto proto) { for (int i = 0; i < locs.size(); i++) { targets[i] = PBHelper.convert(locs.get(i)); } + // Set values from the isCached list, re-using references from loc + List cachedLocs = new ArrayList(locs.size()); + List isCachedList = proto.getIsCachedList(); + for (int i=0; i entries = From 8111c3af6b06d6a814e235ad90af5860632d2c25 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 11 Oct 2013 19:44:20 +0000 Subject: [PATCH 34/51] HDFS-5224. Refactor PathBasedCache* methods to use a Path rather than a String. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1531406 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../hadoop/hdfs/DistributedFileSystem.java | 27 +++++- .../AddPathBasedCacheDirectiveException.java | 8 +- .../protocol/PathBasedCacheDescriptor.java | 3 +- .../protocol/PathBasedCacheDirective.java | 69 ++++++++++++--- .../hdfs/protocol/PathBasedCacheEntry.java | 4 +- ...amenodeProtocolServerSideTranslatorPB.java | 15 +++- .../ClientNamenodeProtocolTranslatorPB.java | 5 +- .../hdfs/server/namenode/CacheManager.java | 16 ++-- .../hdfs/server/namenode/FSEditLog.java | 2 +- .../hdfs/server/namenode/FSEditLogLoader.java | 7 +- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 13 ++- ...amenodeProtocolServerSideTranslatorPB.java | 56 +++++++++++++ .../TestCacheReplicationManager.java | 18 ++-- .../namenode/OfflineEditsViewerHelper.java | 5 +- .../namenode/TestPathBasedCacheRequests.java | 84 +++++++++++-------- 16 files changed, 248 insertions(+), 87 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 54e697a3fd5..6107b64e068 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -57,6 +57,9 @@ HDFS-4949 (Unreleased) HDFS-5304. Expose if a block replica is cached in getFileBlockLocations. (Contributed by Andrew Wang) + HDFS-5224. Refactor PathBasedCache* methods to use a Path rather than a + String. (cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 2ece7640a77..90c9ebca23f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1591,7 +1591,12 @@ public Boolean next(final FileSystem fs, final Path p) */ public PathBasedCacheDescriptor addPathBasedCacheDirective( PathBasedCacheDirective directive) throws IOException { - return dfs.addPathBasedCacheDirective(directive); + Path path = new Path(getPathName(fixRelativePart(directive.getPath()))). + makeQualified(getUri(), getWorkingDirectory()); + return dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder(). + setPath(path). + setPool(directive.getPool()). + build()); } /** @@ -1614,8 +1619,24 @@ public void removePathBasedCacheDescriptor(PathBasedCacheDescriptor descriptor) * @return A RemoteIterator which returns PathBasedCacheDescriptor objects. */ public RemoteIterator listPathBasedCacheDescriptors( - String pool, String path) throws IOException { - return dfs.listPathBasedCacheDescriptors(pool, path); + String pool, final Path path) throws IOException { + String pathName = path != null ? getPathName(fixRelativePart(path)) : null; + final RemoteIterator iter = + dfs.listPathBasedCacheDescriptors(pool, pathName); + return new RemoteIterator() { + @Override + public boolean hasNext() throws IOException { + return iter.hasNext(); + } + + @Override + public PathBasedCacheDescriptor next() throws IOException { + PathBasedCacheDescriptor desc = iter.next(); + Path qualPath = desc.getPath().makeQualified(getUri(), path); + return new PathBasedCacheDescriptor(desc.getEntryId(), qualPath, + desc.getPool()); + } + }; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java index c077f9c90be..a59463dae95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AddPathBasedCacheDirectiveException.java @@ -33,12 +33,8 @@ public static final class EmptyPathError extends AddPathBasedCacheDirectiveException { private static final long serialVersionUID = 1L; - public EmptyPathError(String msg) { - super(msg); - } - - public EmptyPathError(PathBasedCacheDirective directive) { - this("empty path in directive " + directive); + public EmptyPathError() { + super("empty path in directive"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java index 2d27942c373..26c1eaa12ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java @@ -19,6 +19,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; @@ -32,7 +33,7 @@ public final class PathBasedCacheDescriptor extends PathBasedCacheDirective { private final long entryId; - public PathBasedCacheDescriptor(long entryId, String path, String pool) { + public PathBasedCacheDescriptor(long entryId, Path path, String pool) { super(path, pool); Preconditions.checkArgument(entryId > 0); this.entryId = entryId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java index 1f60616fc19..15b5bbd3960 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java @@ -25,8 +25,8 @@ import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; @@ -36,21 +36,54 @@ @InterfaceStability.Evolving @InterfaceAudience.Public public class PathBasedCacheDirective { - private final String path; - private final String pool; + /** + * A builder for creating new PathBasedCacheDirective instances. + */ + public static class Builder { - public PathBasedCacheDirective(String path, String pool) { - Preconditions.checkNotNull(path); - Preconditions.checkNotNull(pool); - this.path = path; - this.pool = pool; + private Path path; + private String pool; + + /** + * Builds a new PathBasedCacheDirective populated with the set properties. + * + * @return New PathBasedCacheDirective. + */ + public PathBasedCacheDirective build() { + return new PathBasedCacheDirective(path, pool); + } + + /** + * Sets the path used in this request. + * + * @param path The path used in this request. + * @return This builder, for call chaining. + */ + public Builder setPath(Path path) { + this.path = path; + return this; + } + + /** + * Sets the pool used in this request. + * + * @param pool The pool used in this request. + * @return This builder, for call chaining. + */ + public Builder setPool(String pool) { + this.pool = pool; + return this; + } } + private final Path path; + private final String pool; + /** * @return The path used in this request. */ - public String getPath() { + public Path getPath() { return path; } @@ -68,10 +101,7 @@ public String getPool() { * If this PathBasedCacheDirective is not valid. */ public void validate() throws IOException { - if (path.isEmpty()) { - throw new EmptyPathError(this); - } - if (!DFSUtil.isValidName(path)) { + if (!DFSUtil.isValidName(path.toUri().getPath())) { throw new InvalidPathNameError(this); } if (pool.isEmpty()) { @@ -108,4 +138,17 @@ public String toString() { append(" }"); return builder.toString(); } + + /** + * Protected constructor. Callers use Builder to create new instances. + * + * @param path The path used in this request. + * @param pool The pool used in this request. + */ + protected PathBasedCacheDirective(Path path, String pool) { + Preconditions.checkNotNull(path); + Preconditions.checkNotNull(pool); + this.path = path; + this.pool = pool; + } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index b4bd1545e3c..2c40885da3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.CachePool; import com.google.common.base.Preconditions; @@ -65,6 +66,7 @@ public String toString() { } public PathBasedCacheDescriptor getDescriptor() { - return new PathBasedCacheDescriptor(entryId, path, pool.getPoolName()); + return new PathBasedCacheDescriptor(entryId, new Path(path), + pool.getPoolName()); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 272286572a8..8c4b6441cba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -25,8 +25,10 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -176,6 +178,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.token.Token; +import org.apache.commons.lang.StringUtils; + import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -1035,8 +1039,13 @@ public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective( throws ServiceException { try { PathBasedCacheDirectiveProto proto = request.getDirective(); - PathBasedCacheDirective directive = - new PathBasedCacheDirective(proto.getPath(), proto.getPool()); + if (StringUtils.isEmpty(proto.getPath())) { + throw new EmptyPathError(); + } + PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder(). + setPath(new Path(proto.getPath())). + setPool(proto.getPool()). + build(); PathBasedCacheDescriptor descriptor = server.addPathBasedCacheDirective(directive); AddPathBasedCacheDirectiveResponseProto.Builder builder = @@ -1080,7 +1089,7 @@ public ListPathBasedCacheDescriptorsResponseProto listPathBasedCacheDescriptors( builder.addElements( ListPathBasedCacheDescriptorsElementProto.newBuilder(). setId(directive.getEntryId()). - setPath(directive.getPath()). + setPath(directive.getPath().toUri().getPath()). setPool(directive.getPool())); prevId = directive.getEntryId(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 6c84c0460ce..ea3dfc19ce7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; @@ -1009,7 +1010,7 @@ public PathBasedCacheDescriptor addPathBasedCacheDirective( AddPathBasedCacheDirectiveRequestProto.Builder builder = AddPathBasedCacheDirectiveRequestProto.newBuilder(); builder.setDirective(PathBasedCacheDirectiveProto.newBuilder() - .setPath(directive.getPath()) + .setPath(directive.getPath().toUri().getPath()) .setPool(directive.getPool()) .build()); AddPathBasedCacheDirectiveResponseProto result = @@ -1047,7 +1048,7 @@ public PathBasedCacheDescriptor get(int i) { ListPathBasedCacheDescriptorsElementProto elementProto = response.getElements(i); return new PathBasedCacheDescriptor(elementProto.getId(), - elementProto.getPath(), elementProto.getPool()); + new Path(elementProto.getPath()), elementProto.getPool()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index a7d9f0698a1..c639bf2e492 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; @@ -138,7 +139,7 @@ synchronized long getNextEntryId() { private synchronized PathBasedCacheEntry findEntry(PathBasedCacheDirective directive) { List existing = - entriesByPath.get(directive.getPath()); + entriesByPath.get(directive.getPath().toUri().getPath()); if (existing == null) { return null; } @@ -246,8 +247,8 @@ PathBasedCacheDescriptor unprotectedAddDirective( CachePool pool = cachePools.get(directive.getPool()); // Add a new entry with the next available ID. PathBasedCacheEntry entry; - entry = new PathBasedCacheEntry(getNextEntryId(), directive.getPath(), - pool); + entry = new PathBasedCacheEntry(getNextEntryId(), + directive.getPath().toUri().getPath(), pool); unprotectedAddEntry(entry); @@ -303,7 +304,7 @@ void unprotectedRemoveDescriptor(long id) throws IOException { assert namesystem.hasWriteLock(); PathBasedCacheEntry existing = entriesById.get(id); // Remove the corresponding entry in entriesByPath. - String path = existing.getDescriptor().getPath(); + String path = existing.getDescriptor().getPath().toUri().getPath(); List entries = entriesByPath.get(path); if (entries == null || !entries.remove(existing)) { throw new UnexpectedRemovePathBasedCacheDescriptorException(id); @@ -315,10 +316,11 @@ void unprotectedRemoveDescriptor(long id) throws IOException { // Set the path as uncached in the namesystem try { - INode node = dir.getINode(existing.getDescriptor().getPath()); + INode node = dir.getINode(existing.getDescriptor().getPath().toUri(). + getPath()); if (node != null && node.isFile()) { - namesystem.setCacheReplicationInt(existing.getDescriptor().getPath(), - (short) 0); + namesystem.setCacheReplicationInt(existing.getDescriptor().getPath(). + toUri().getPath(), (short) 0); } } catch (IOException e) { LOG.warn("removeDescriptor " + id + ": failure while setting cache" diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 10aad74e03a..9ae790efba1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -958,7 +958,7 @@ void logAddPathBasedCacheDirective(PathBasedCacheDirective directive, boolean toLogRpcIds) { AddPathBasedCacheDirectiveOp op = AddPathBasedCacheDirectiveOp.getInstance( cache.get()) - .setPath(directive.getPath()) + .setPath(directive.getPath().toUri().getPath()) .setPool(directive.getPool()); logRpcIds(op, toLogRpcIds); logEdit(op); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index bd13ca4af79..61cc2d0ba4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -641,8 +642,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_ADD_PATH_BASED_CACHE_DIRECTIVE: { AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op; - PathBasedCacheDirective d = new PathBasedCacheDirective(addOp.path, - addOp.pool); + PathBasedCacheDirective d = new PathBasedCacheDirective.Builder(). + setPath(new Path(addOp.path)). + setPool(addOp.pool). + build(); PathBasedCacheDescriptor descriptor = fsNamesys.getCacheManager().unprotectedAddDirective(d); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index f0a71c595b2..c4633c137c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -164,8 +165,10 @@ public int run(Configuration conf, List args) throws IOException { } DistributedFileSystem dfs = getDFS(conf); - PathBasedCacheDirective directive = - new PathBasedCacheDirective(path, poolName); + PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder(). + setPath(new Path(path)). + setPool(poolName). + build(); try { PathBasedCacheDescriptor descriptor = @@ -281,12 +284,14 @@ public int run(Configuration conf, List args) throws IOException { build(); DistributedFileSystem dfs = getDFS(conf); RemoteIterator iter = - dfs.listPathBasedCacheDescriptors(poolFilter, pathFilter); + dfs.listPathBasedCacheDescriptors(poolFilter, pathFilter != null ? + new Path(pathFilter) : null); int numEntries = 0; while (iter.hasNext()) { PathBasedCacheDescriptor entry = iter.next(); String row[] = new String[] { - "" + entry.getEntryId(), entry.getPool(), entry.getPath(), + "" + entry.getEntryId(), entry.getPool(), + entry.getPath().toUri().getPath(), }; tableListing.addRow(row); numEntries++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java new file mode 100644 index 00000000000..4cf53253c18 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestClientNamenodeProtocolServerSideTranslatorPB.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocolPB; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import org.junit.Test; + +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathBasedCacheDirectiveRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathBasedCacheDirectiveProto; + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +public class TestClientNamenodeProtocolServerSideTranslatorPB { + + @Test + public void testAddPathBasedCacheDirectiveEmptyPathError() throws Exception { + ClientProtocol server = mock(ClientProtocol.class); + RpcController controller = mock(RpcController.class); + AddPathBasedCacheDirectiveRequestProto request = + AddPathBasedCacheDirectiveRequestProto.newBuilder(). + setDirective(PathBasedCacheDirectiveProto.newBuilder(). + setPath(""). + setPool("pool"). + build()). + build(); + ClientNamenodeProtocolServerSideTranslatorPB translator = + new ClientNamenodeProtocolServerSideTranslatorPB(server); + try { + translator.addPathBasedCacheDirective(controller, request); + fail("Expected ServiceException"); + } catch (ServiceException e) { + assertNotNull(e.getCause()); + assertTrue(e.getCause() instanceof EmptyPathError); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java index ee9cc96adf5..369cc376b58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java @@ -152,12 +152,14 @@ public void testCachePaths() throws Exception { waitForExpectedNumCachedBlocks(expected); // Cache and check each path in sequence for (int i=0; i dit = dfs.listPathBasedCacheDescriptors(null, null); @@ -219,7 +223,7 @@ public void testCacheManagerRestart() throws Exception { assertTrue("Unexpected # of cache entries: " + i, dit.hasNext()); PathBasedCacheDescriptor cd = dit.next(); assertEquals(i+1, cd.getEntryId()); - assertEquals(entryPrefix + i, cd.getPath()); + assertEquals(entryPrefix + i, cd.getPath().toUri().getPath()); assertEquals(pool, cd.getPool()); } assertFalse("Unexpected # of cache descriptors found", dit.hasNext()); @@ -243,7 +247,7 @@ public void testCacheManagerRestart() throws Exception { assertTrue("Unexpected # of cache entries: " + i, dit.hasNext()); PathBasedCacheDescriptor cd = dit.next(); assertEquals(i+1, cd.getEntryId()); - assertEquals(entryPrefix + i, cd.getPath()); + assertEquals(entryPrefix + i, cd.getPath().toUri().getPath()); assertEquals(pool, cd.getPool()); } assertFalse("Unexpected # of cache descriptors found", dit.hasNext()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index b753b8d05ff..cd08e98f258 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -243,7 +243,10 @@ public Object run() throws IOException, InterruptedException { .setWeight(1989)); // OP_ADD_PATH_BASED_CACHE_DIRECTIVE 33 PathBasedCacheDescriptor descriptor = - dfs.addPathBasedCacheDirective(new PathBasedCacheDirective("/bar", pool)); + dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder(). + setPath(new Path("/bar")). + setPool(pool). + build()); // OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR 34 dfs.removePathBasedCacheDescriptor(descriptor); // OP_REMOVE_CACHE_POOL 37 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index d58343fe24d..7685c11ef78 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -31,13 +31,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; @@ -312,12 +312,18 @@ public void testAddRemoveDirectives() throws Exception { proto.addCachePool(new CachePoolInfo("pool4"). setMode(new FsPermission((short)0))); - PathBasedCacheDirective alpha = - new PathBasedCacheDirective("/alpha", "pool1"); - PathBasedCacheDirective beta = - new PathBasedCacheDirective("/beta", "pool2"); - PathBasedCacheDirective delta = - new PathBasedCacheDirective("/delta", "pool1"); + PathBasedCacheDirective alpha = new PathBasedCacheDirective.Builder(). + setPath(new Path("/alpha")). + setPool("pool1"). + build(); + PathBasedCacheDirective beta = new PathBasedCacheDirective.Builder(). + setPath(new Path("/beta")). + setPool("pool2"). + build(); + PathBasedCacheDirective delta = new PathBasedCacheDirective.Builder(). + setPath(new Path("/delta")). + setPool("pool1"). + build(); PathBasedCacheDescriptor alphaD = addAsUnprivileged(alpha); PathBasedCacheDescriptor alphaD2 = addAsUnprivileged(alpha); @@ -326,21 +332,20 @@ public void testAddRemoveDirectives() throws Exception { PathBasedCacheDescriptor betaD = addAsUnprivileged(beta); try { - addAsUnprivileged(new PathBasedCacheDirective("", "pool3")); - fail("expected an error when adding an empty path"); - } catch (IOException ioe) { - assertTrue(ioe instanceof EmptyPathError); - } - - try { - addAsUnprivileged(new PathBasedCacheDirective("/unicorn", "no_such_pool")); + addAsUnprivileged(new PathBasedCacheDirective.Builder(). + setPath(new Path("/unicorn")). + setPool("no_such_pool"). + build()); fail("expected an error when adding to a non-existent pool."); } catch (IOException ioe) { assertTrue(ioe instanceof InvalidPoolNameError); } try { - addAsUnprivileged(new PathBasedCacheDirective("/blackhole", "pool4")); + addAsUnprivileged(new PathBasedCacheDirective.Builder(). + setPath(new Path("/blackhole")). + setPool("pool4"). + build()); fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone)."); } catch (IOException ioe) { @@ -348,43 +353,49 @@ public void testAddRemoveDirectives() throws Exception { } try { - addAsUnprivileged(new PathBasedCacheDirective("//illegal/path/", "pool1")); + addAsUnprivileged(new PathBasedCacheDirective.Builder(). + setPath(new Path("/illegal:path/")). + setPool("pool1"). + build()); fail("expected an error when adding a malformed path " + "to the cache directives."); - } catch (IOException ioe) { - assertTrue(ioe instanceof InvalidPathNameError); + } catch (IllegalArgumentException e) { + // expected } try { - addAsUnprivileged(new PathBasedCacheDirective("/emptypoolname", "")); + addAsUnprivileged(new PathBasedCacheDirective.Builder(). + setPath(new Path("/emptypoolname")). + setPool(""). + build()); Assert.fail("expected an error when adding a PathBasedCache " + "directive with an empty pool name."); } catch (IOException ioe) { Assert.assertTrue(ioe instanceof InvalidPoolNameError); } - try { - addAsUnprivileged(new PathBasedCacheDirective("bogus", "pool1")); - Assert.fail("expected an error when adding a PathBasedCache " + - "directive with a non-absolute path name."); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof InvalidPathNameError); - } - PathBasedCacheDescriptor deltaD = addAsUnprivileged(delta); + // We expect the following to succeed, because DistributedFileSystem + // qualifies the path. + PathBasedCacheDescriptor relativeD = addAsUnprivileged( + new PathBasedCacheDirective.Builder(). + setPath(new Path("relative")). + setPool("pool1"). + build()); + RemoteIterator iter; - iter = proto.listPathBasedCacheDescriptors(0, null, null); - validateListAll(iter, alphaD, betaD, deltaD); - iter = proto.listPathBasedCacheDescriptors(0, "pool3", null); + iter = dfs.listPathBasedCacheDescriptors(null, null); + validateListAll(iter, alphaD, betaD, deltaD, relativeD); + iter = dfs.listPathBasedCacheDescriptors("pool3", null); Assert.assertFalse(iter.hasNext()); - iter = proto.listPathBasedCacheDescriptors(0, "pool1", null); - validateListAll(iter, alphaD, deltaD); - iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); + iter = dfs.listPathBasedCacheDescriptors("pool1", null); + validateListAll(iter, alphaD, deltaD, relativeD); + iter = dfs.listPathBasedCacheDescriptors("pool2", null); validateListAll(iter, betaD); dfs.removePathBasedCacheDescriptor(betaD); - iter = proto.listPathBasedCacheDescriptors(0, "pool2", null); + iter = dfs.listPathBasedCacheDescriptors("pool2", null); Assert.assertFalse(iter.hasNext()); try { @@ -409,7 +420,8 @@ public void testAddRemoveDirectives() throws Exception { dfs.removePathBasedCacheDescriptor(alphaD); dfs.removePathBasedCacheDescriptor(deltaD); - iter = proto.listPathBasedCacheDescriptors(0, null, null); + dfs.removePathBasedCacheDescriptor(relativeD); + iter = dfs.listPathBasedCacheDescriptors(null, null); assertFalse(iter.hasNext()); } } From 09e9e57a0bdc4ccc963af717d71c352030e6eed9 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 11 Oct 2013 23:13:13 +0000 Subject: [PATCH 35/51] HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is improperly configured. (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1531460 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 ++ .../hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../hadoop/hdfs/TestDatanodeConfig.java | 42 +++++++++++-------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 6107b64e068..129d2345ba0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -83,3 +83,6 @@ HDFS-4949 (Unreleased) HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick McCabe) + + HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is + improperly configured. (Colin Patrick McCabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 71a62ab240c..226eb97d5b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -753,7 +753,7 @@ void startDataNode(Configuration conf, if (dnConf.maxLockedMemory > ulimit) { throw new RuntimeException(String.format( "Cannot start datanode because the configured max locked memory" + - " size (%s) of %d bytes is less than the datanode's available" + + " size (%s) of %d bytes is more than the datanode's available" + " RLIMIT_MEMLOCK ulimit of %d bytes.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, dnConf.maxLockedMemory, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java index 4bdcfee6357..8e9d013035b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java @@ -114,25 +114,33 @@ private static String makeURI(String scheme, String host, String path) public void testMemlockLimit() throws Exception { assumeTrue(NativeIO.isAvailable()); final long memlockLimit = NativeIO.getMemlockLimit(); + + // Can't increase the memlock limit past the maximum. + assumeTrue(memlockLimit != Long.MAX_VALUE); + Configuration conf = cluster.getConfiguration(0); - // Try starting the DN with limit configured to the ulimit - conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - memlockLimit); - if (memlockLimit == Long.MAX_VALUE) { - // Can't increase the memlock limit past the maximum. - return; - } - DataNode dn = null; - dn = DataNode.createDataNode(new String[]{}, conf); - dn.shutdown(); - // Try starting the DN with a limit > ulimit - conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - memlockLimit+1); + long prevLimit = conf. + getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT); try { - dn = DataNode.createDataNode(new String[]{}, conf); - } catch (RuntimeException e) { - GenericTestUtils.assertExceptionContains( - "less than the datanode's available RLIMIT_MEMLOCK", e); + // Try starting the DN with limit configured to the ulimit + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + memlockLimit); + DataNode dn = null; + dn = DataNode.createDataNode(new String[]{}, conf); + dn.shutdown(); + // Try starting the DN with a limit > ulimit + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + memlockLimit+1); + try { + dn = DataNode.createDataNode(new String[]{}, conf); + } catch (RuntimeException e) { + GenericTestUtils.assertExceptionContains( + "more than the datanode's available RLIMIT_MEMLOCK", e); + } + } finally { + conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + prevLimit); } } } From 15d08c4778350a86d7bae0174aeb48f8d8f61cce Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Mon, 14 Oct 2013 22:19:10 +0000 Subject: [PATCH 36/51] HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only (cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532116 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 1 + .../hadoop/hdfs/protocolPB/PBHelper.java | 57 +++++++++++++++---- .../blockmanagement/DatanodeManager.java | 17 ++++-- .../hdfs/server/datanode/BPOfferService.java | 11 ++-- .../datanode/fsdataset/FsDatasetSpi.java | 8 +-- .../fsdataset/impl/FsDatasetCache.java | 23 ++++---- .../fsdataset/impl/FsDatasetImpl.java | 48 +++++++++------- .../hdfs/server/protocol/BlockIdCommand.java | 50 ++++++++++++++++ .../src/main/proto/DatanodeProtocol.proto | 18 +++++- .../server/datanode/SimulatedFSDataset.java | 4 +- 10 files changed, 177 insertions(+), 60 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 129d2345ba0..aa045dfb80c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -61,6 +61,7 @@ HDFS-4949 (Unreleased) String. (cnauroth) OPTIMIZATIONS + HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) BUG FIXES HDFS-5169. hdfs.c: translateZCRException: null pointer deref when diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 2e5beaade24..4f9ce6c79aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; @@ -119,6 +120,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; @@ -696,6 +698,8 @@ public static DatanodeCommand convert(DatanodeCommandProto proto) { return PBHelper.convert(proto.getKeyUpdateCmd()); case RegisterCommand: return REG_CMD; + case BlockIdCommand: + return PBHelper.convert(proto.getBlkIdCmd()); } return null; } @@ -738,12 +742,6 @@ public static BlockCommandProto convert(BlockCommand cmd) { case DatanodeProtocol.DNA_SHUTDOWN: builder.setAction(BlockCommandProto.Action.SHUTDOWN); break; - case DatanodeProtocol.DNA_CACHE: - builder.setAction(BlockCommandProto.Action.CACHE); - break; - case DatanodeProtocol.DNA_UNCACHE: - builder.setAction(BlockCommandProto.Action.UNCACHE); - break; default: throw new AssertionError("Invalid action"); } @@ -754,6 +752,26 @@ public static BlockCommandProto convert(BlockCommand cmd) { builder.addAllTargets(PBHelper.convert(cmd.getTargets())); return builder.build(); } + + public static BlockIdCommandProto convert(BlockIdCommand cmd) { + BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder() + .setBlockPoolId(cmd.getBlockPoolId()); + switch (cmd.getAction()) { + case DatanodeProtocol.DNA_CACHE: + builder.setAction(BlockIdCommandProto.Action.CACHE); + break; + case DatanodeProtocol.DNA_UNCACHE: + builder.setAction(BlockIdCommandProto.Action.UNCACHE); + break; + default: + throw new AssertionError("Invalid action"); + } + long[] blockIds = cmd.getBlockIds(); + for (int i = 0; i < blockIds.length; i++) { + builder.addBlockIds(blockIds[i]); + } + return builder.build(); + } private static List convert(DatanodeInfo[][] targets) { DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; @@ -796,11 +814,14 @@ public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) { break; case DatanodeProtocol.DNA_TRANSFER: case DatanodeProtocol.DNA_INVALIDATE: + case DatanodeProtocol.DNA_SHUTDOWN: + builder.setCmdType(DatanodeCommandProto.Type.BlockCommand). + setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand)); + break; case DatanodeProtocol.DNA_CACHE: case DatanodeProtocol.DNA_UNCACHE: - case DatanodeProtocol.DNA_SHUTDOWN: - builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( - PBHelper.convert((BlockCommand) datanodeCommand)); + builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand). + setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand)); break; case DatanodeProtocol.DNA_UNKNOWN: //Not expected default: @@ -851,6 +872,20 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { case SHUTDOWN: action = DatanodeProtocol.DNA_SHUTDOWN; break; + default: + throw new AssertionError("Unknown action type: " + blkCmd.getAction()); + } + return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); + } + + public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) { + int numBlockIds = blkIdCmd.getBlockIdsCount(); + long blockIds[] = new long[numBlockIds]; + for (int i = 0; i < numBlockIds; i++) { + blockIds[i] = blkIdCmd.getBlockIds(i); + } + int action = DatanodeProtocol.DNA_UNKNOWN; + switch (blkIdCmd.getAction()) { case CACHE: action = DatanodeProtocol.DNA_CACHE; break; @@ -858,9 +893,9 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { action = DatanodeProtocol.DNA_UNCACHE; break; default: - throw new AssertionError("Unknown action type: " + blkCmd.getAction()); + throw new AssertionError("Unknown action type: " + blkIdCmd.getAction()); } - return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets); + return new BlockIdCommand(action, blkIdCmd.getBlockPoolId(), blockIds); } public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 83959319e0f..006184a9ac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; @@ -1308,14 +1309,22 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, // Check pending caching List pendingCacheList = nodeinfo.getCacheBlocks(); if (pendingCacheList != null) { - cmds.add(new BlockCommand(DatanodeProtocol.DNA_CACHE, blockPoolId, - pendingCacheList.toArray(new Block[] {}))); + long blockIds[] = new long[pendingCacheList.size()]; + for (int i = 0; i < pendingCacheList.size(); i++) { + blockIds[i] = pendingCacheList.get(i).getBlockId(); + } + cmds.add(new BlockIdCommand(DatanodeProtocol.DNA_CACHE, blockPoolId, + blockIds)); } // Check cached block invalidation blks = nodeinfo.getInvalidateCacheBlocks(); if (blks != null) { - cmds.add(new BlockCommand(DatanodeProtocol.DNA_UNCACHE, - blockPoolId, blks)); + long blockIds[] = new long[blks.length]; + for (int i = 0; i < blks.length; i++) { + blockIds[i] = blks[i].getBlockId(); + } + cmds.add(new BlockIdCommand(DatanodeProtocol.DNA_UNCACHE, + blockPoolId, blockIds)); } blockManager.addKeyUpdateCommand(cmds, nodeinfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index bc78eda828a..ce934764673 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; +import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; @@ -518,6 +519,8 @@ private boolean processCommandFromActive(DatanodeCommand cmd, return true; final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null; + final BlockIdCommand blockIdCmd = + cmd instanceof BlockIdCommand ? (BlockIdCommand)cmd: null; switch(cmd.getAction()) { case DatanodeProtocol.DNA_TRANSFER: @@ -545,13 +548,13 @@ private boolean processCommandFromActive(DatanodeCommand cmd, break; case DatanodeProtocol.DNA_CACHE: LOG.info("DatanodeCommand action: DNA_CACHE"); - dn.getFSDataset().cache(bcmd.getBlockPoolId(), bcmd.getBlocks()); - dn.metrics.incrBlocksCached(bcmd.getBlocks().length); + dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds()); + dn.metrics.incrBlocksCached(blockIdCmd.getBlockIds().length); break; case DatanodeProtocol.DNA_UNCACHE: LOG.info("DatanodeCommand action: DNA_UNCACHE"); - dn.getFSDataset().uncache(bcmd.getBlockPoolId(), bcmd.getBlocks()); - dn.metrics.incrBlocksUncached(bcmd.getBlocks().length); + dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds()); + dn.metrics.incrBlocksUncached(blockIdCmd.getBlockIds().length); break; case DatanodeProtocol.DNA_SHUTDOWN: // TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index bf93f149fdf..07f0e72aada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -305,16 +305,16 @@ public void recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen /** * Caches the specified blocks * @param bpid Block pool id - * @param cacheBlks - block to cache + * @param blockIds - block ids to cache */ - public void cache(String bpid, Block[] cacheBlks); + public void cache(String bpid, long[] blockIds); /** * Uncaches the specified blocks * @param bpid Block pool id - * @param uncacheBlks - blocks to uncache + * @param blockIds - blocks ids to uncache */ - public void uncache(String bpid, Block[] uncacheBlks); + public void uncache(String bpid, long[] blockIds); /** * Check if all the data directories are healthy diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index b0a3a8d77fb..4bd1cf5039c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -91,8 +91,8 @@ public FsDatasetCache(FsDatasetImpl dataset) { /** * @return if the block is cached */ - boolean isCached(String bpid, Block block) { - MappableBlock mapBlock = cachedBlocks.get(block.getBlockId()); + boolean isCached(String bpid, long blockId) { + MappableBlock mapBlock = cachedBlocks.get(blockId); if (mapBlock != null) { return mapBlock.getBlockPoolId().equals(bpid); } @@ -127,7 +127,7 @@ List getCachedBlocks(String bpid) { */ void cacheBlock(String bpid, Block block, FsVolumeImpl volume, FileInputStream blockIn, FileInputStream metaIn) { - if (isCached(bpid, block)) { + if (isCached(bpid, block.getBlockId())) { return; } MappableBlock mapBlock = null; @@ -166,23 +166,23 @@ void cacheBlock(String bpid, Block block, FsVolumeImpl volume, /** * Uncaches a block if it is cached. - * @param block to uncache + * @param blockId id to uncache */ - void uncacheBlock(String bpid, Block block) { - MappableBlock mapBlock = cachedBlocks.get(block.getBlockId()); + void uncacheBlock(String bpid, long blockId) { + MappableBlock mapBlock = cachedBlocks.get(blockId); if (mapBlock != null && mapBlock.getBlockPoolId().equals(bpid) && - mapBlock.getBlock().equals(block)) { + mapBlock.getBlock().getBlockId() == blockId) { mapBlock.close(); - cachedBlocks.remove(block.getBlockId()); + cachedBlocks.remove(blockId); long bytes = mapBlock.getNumBytes(); long used = usedBytes.get(); while (!usedBytes.compareAndSet(used, used - bytes)) { used = usedBytes.get(); } - LOG.info("Successfully uncached block " + block); + LOG.info("Successfully uncached block " + blockId); } else { - LOG.info("Could not uncache block " + block + ": unknown block."); + LOG.info("Could not uncache block " + blockId + ": unknown block."); } } @@ -215,7 +215,8 @@ public void run() { // If we failed or the block became uncacheable in the meantime, // clean up and return the reserved cache allocation if (!success || - !dataset.validToCache(block.getBlockPoolId(), block.getBlock())) { + !dataset.validToCache(block.getBlockPoolId(), + block.getBlock().getBlockId())) { block.close(); long used = usedBytes.get(); while (!usedBytes.compareAndSet(used, used-block.getNumBytes())) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index f5e0c371136..be664fd76b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -562,7 +562,7 @@ private synchronized ReplicaBeingWritten append(String bpid, FinalizedReplica replicaInfo, long newGS, long estimateBlockLen) throws IOException { // uncache the block - cacheManager.uncacheBlock(bpid, replicaInfo); + cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId()); // unlink the finalized replica replicaInfo.unlinkBlock(1); @@ -1178,7 +1178,7 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { } // Uncache the block synchronously - cacheManager.uncacheBlock(bpid, invalidBlks[i]); + cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId()); // Delete the block asynchronously to make sure we can do it fast enough asyncDiskService.deleteAsync(v, f, FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()), @@ -1189,20 +1189,22 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException { } } - synchronized boolean validToCache(String bpid, Block blk) { - ReplicaInfo info = volumeMap.get(bpid, blk); + synchronized boolean validToCache(String bpid, long blockId) { + ReplicaInfo info = volumeMap.get(bpid, blockId); if (info == null) { - LOG.warn("Failed to cache replica " + blk + ": ReplicaInfo not found."); + LOG.warn("Failed to cache replica in block pool " + bpid + + " with block id " + blockId + ": ReplicaInfo not found."); return false; } FsVolumeImpl volume = (FsVolumeImpl)info.getVolume(); if (volume == null) { - LOG.warn("Failed to cache replica " + blk + ": Volume not found."); + LOG.warn("Failed to cache block with id " + blockId + + ": Volume not found."); return false; } if (info.getState() != ReplicaState.FINALIZED) { - LOG.warn("Failed to cache replica " + blk + ": Replica is not" - + " finalized."); + LOG.warn("Failed to block with id " + blockId + + ": Replica is not finalized."); return false; } return true; @@ -1211,31 +1213,33 @@ synchronized boolean validToCache(String bpid, Block blk) { /** * Asynchronously attempts to cache a single block via {@link FsDatasetCache}. */ - private void cacheBlock(String bpid, Block blk) { + private void cacheBlock(String bpid, long blockId) { ReplicaInfo info; FsVolumeImpl volume; synchronized (this) { - if (!validToCache(bpid, blk)) { + if (!validToCache(bpid, blockId)) { return; } - info = volumeMap.get(bpid, blk); + info = volumeMap.get(bpid, blockId); volume = (FsVolumeImpl)info.getVolume(); } // Try to open block and meta streams FileInputStream blockIn = null; FileInputStream metaIn = null; boolean success = false; + ExtendedBlock extBlk = + new ExtendedBlock(bpid, blockId, + info.getBytesOnDisk(), info.getGenerationStamp()); try { - ExtendedBlock extBlk = new ExtendedBlock(bpid, blk); blockIn = (FileInputStream)getBlockInputStream(extBlk, 0); metaIn = (FileInputStream)getMetaDataInputStream(extBlk) .getWrappedStream(); success = true; } catch (ClassCastException e) { - LOG.warn("Failed to cache replica " + blk + ": Underlying blocks" + LOG.warn("Failed to cache replica " + extBlk + ": Underlying blocks" + " are not backed by files.", e); } catch (IOException e) { - LOG.warn("Failed to cache replica " + blk + ": IOException while" + LOG.warn("Failed to cache replica " + extBlk + ": IOException while" + " trying to open block or meta files.", e); } if (!success) { @@ -1243,21 +1247,21 @@ private void cacheBlock(String bpid, Block blk) { IOUtils.closeQuietly(metaIn); return; } - cacheManager.cacheBlock(bpid, blk, volume, blockIn, metaIn); + cacheManager.cacheBlock(bpid, extBlk.getLocalBlock(), + volume, blockIn, metaIn); } @Override // FsDatasetSpi - public void cache(String bpid, Block[] cacheBlks) { - for (int i=0; i Date: Mon, 14 Oct 2013 22:56:11 +0000 Subject: [PATCH 37/51] HDFS-5358. Add replication field to PathBasedCacheDirective. (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532124 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 ++ .../hadoop/hdfs/DistributedFileSystem.java | 3 +- .../protocol/PathBasedCacheDescriptor.java | 8 +++-- .../protocol/PathBasedCacheDirective.java | 34 +++++++++++++++++-- .../hdfs/protocol/PathBasedCacheEntry.java | 30 ++++++++++++++-- ...amenodeProtocolServerSideTranslatorPB.java | 3 ++ .../ClientNamenodeProtocolTranslatorPB.java | 9 +++-- .../hdfs/server/namenode/CacheManager.java | 7 ++-- .../hdfs/server/namenode/FSEditLog.java | 1 + .../hdfs/server/namenode/FSEditLogLoader.java | 1 + .../hdfs/server/namenode/FSEditLogOp.java | 13 ++++++- .../apache/hadoop/hdfs/tools/CacheAdmin.java | 10 +++++- .../ImageLoaderCurrent.java | 1 + .../offlineImageViewer/ImageVisitor.java | 1 + .../main/proto/ClientNamenodeProtocol.proto | 6 ++-- .../namenode/OfflineEditsViewerHelper.java | 1 + .../src/test/resources/editsStored.xml | 1 + 17 files changed, 115 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index aa045dfb80c..e585bbe4539 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -60,6 +60,9 @@ HDFS-4949 (Unreleased) HDFS-5224. Refactor PathBasedCache* methods to use a Path rather than a String. (cnauroth) + HDFS-5358. Add replication field to PathBasedCacheDirective. + (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 90c9ebca23f..a51c31116ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1595,6 +1595,7 @@ public PathBasedCacheDescriptor addPathBasedCacheDirective( makeQualified(getUri(), getWorkingDirectory()); return dfs.addPathBasedCacheDirective(new PathBasedCacheDirective.Builder(). setPath(path). + setReplication(directive.getReplication()). setPool(directive.getPool()). build()); } @@ -1634,7 +1635,7 @@ public PathBasedCacheDescriptor next() throws IOException { PathBasedCacheDescriptor desc = iter.next(); Path qualPath = desc.getPath().makeQualified(getUri(), path); return new PathBasedCacheDescriptor(desc.getEntryId(), qualPath, - desc.getPool()); + desc.getReplication(), desc.getPool()); } }; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java index 26c1eaa12ee..6e60a9bd5e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDescriptor.java @@ -33,8 +33,9 @@ public final class PathBasedCacheDescriptor extends PathBasedCacheDirective { private final long entryId; - public PathBasedCacheDescriptor(long entryId, Path path, String pool) { - super(path, pool); + public PathBasedCacheDescriptor(long entryId, Path path, + short replication, String pool) { + super(path, replication, pool); Preconditions.checkArgument(entryId > 0); this.entryId = entryId; } @@ -54,6 +55,7 @@ public boolean equals(Object o) { PathBasedCacheDescriptor other = (PathBasedCacheDescriptor)o; return new EqualsBuilder().append(entryId, other.entryId). append(getPath(), other.getPath()). + append(getReplication(), other.getReplication()). append(getPool(), other.getPool()). isEquals(); } @@ -62,6 +64,7 @@ public boolean equals(Object o) { public int hashCode() { return new HashCodeBuilder().append(entryId). append(getPath()). + append(getReplication()). append(getPool()). hashCode(); } @@ -71,6 +74,7 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ entryId:").append(entryId). append(", path:").append(getPath()). + append(", replication:").append(getReplication()). append(", pool:").append(getPool()). append(" }"); return builder.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java index 15b5bbd3960..b25b0392938 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java @@ -41,8 +41,8 @@ public class PathBasedCacheDirective { * A builder for creating new PathBasedCacheDirective instances. */ public static class Builder { - private Path path; + private short replication = (short)1; private String pool; /** @@ -51,7 +51,7 @@ public static class Builder { * @return New PathBasedCacheDirective. */ public PathBasedCacheDirective build() { - return new PathBasedCacheDirective(path, pool); + return new PathBasedCacheDirective(path, replication, pool); } /** @@ -65,6 +65,17 @@ public Builder setPath(Path path) { return this; } + /** + * Sets the replication used in this request. + * + * @param replication The replication used in this request. + * @return This builder, for call chaining. + */ + public Builder setReplication(short replication) { + this.replication = replication; + return this; + } + /** * Sets the pool used in this request. * @@ -78,6 +89,7 @@ public Builder setPool(String pool) { } private final Path path; + private final short replication; private final String pool; /** @@ -87,6 +99,13 @@ public Path getPath() { return path; } + /** + * @return The number of times the block should be cached. + */ + public short getReplication() { + return replication; + } + /** * @return The pool used in this request. */ @@ -104,6 +123,10 @@ public void validate() throws IOException { if (!DFSUtil.isValidName(path.toUri().getPath())) { throw new InvalidPathNameError(this); } + if (replication <= 0) { + throw new IOException("Tried to request a cache replication " + + "factor of " + replication + ", but that is less than 1."); + } if (pool.isEmpty()) { throw new InvalidPoolNameError(this); } @@ -119,6 +142,7 @@ public boolean equals(Object o) { } PathBasedCacheDirective other = (PathBasedCacheDirective)o; return new EqualsBuilder().append(getPath(), other.getPath()). + append(getReplication(), other.getReplication()). append(getPool(), other.getPool()). isEquals(); } @@ -126,6 +150,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return new HashCodeBuilder().append(getPath()). + append(replication). append(getPool()). hashCode(); } @@ -134,6 +159,7 @@ public int hashCode() { public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ path:").append(path). + append(", replication:").append(replication). append(", pool:").append(pool). append(" }"); return builder.toString(); @@ -143,12 +169,14 @@ public String toString() { * Protected constructor. Callers use Builder to create new instances. * * @param path The path used in this request. + * @param replication The replication used in this request. * @param pool The pool used in this request. */ - protected PathBasedCacheDirective(Path path, String pool) { + protected PathBasedCacheDirective(Path path, short replication, String pool) { Preconditions.checkNotNull(path); Preconditions.checkNotNull(pool); this.path = path; + this.replication = replication; this.pool = pool; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java index 2c40885da3c..f500e53000a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheEntry.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.namenode.CachePool; @@ -32,14 +33,18 @@ public final class PathBasedCacheEntry { private final long entryId; private final String path; + private final short replication; private final CachePool pool; - public PathBasedCacheEntry(long entryId, String path, CachePool pool) { + public PathBasedCacheEntry(long entryId, String path, + short replication, CachePool pool) { Preconditions.checkArgument(entryId > 0); this.entryId = entryId; - Preconditions.checkNotNull(path); + Preconditions.checkArgument(replication > 0); this.path = path; Preconditions.checkNotNull(pool); + this.replication = replication; + Preconditions.checkNotNull(path); this.pool = pool; } @@ -55,18 +60,37 @@ public CachePool getPool() { return pool; } + public short getReplication() { + return replication; + } + @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ entryId:").append(entryId). append(", path:").append(path). + append(", replication:").append(replication). append(", pool:").append(pool). append(" }"); return builder.toString(); } public PathBasedCacheDescriptor getDescriptor() { - return new PathBasedCacheDescriptor(entryId, new Path(path), + return new PathBasedCacheDescriptor(entryId, new Path(path), replication, pool.getPoolName()); } + + @Override + public boolean equals(Object o) { + if (o.getClass() != this.getClass()) { + return false; + } + PathBasedCacheEntry other = (PathBasedCacheEntry)o; + return entryId == other.entryId; + } + + @Override + public int hashCode() { + return new HashCodeBuilder().append(entryId).toHashCode(); + } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 8c4b6441cba..5cc344370e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -180,6 +180,7 @@ import org.apache.commons.lang.StringUtils; +import com.google.common.primitives.Shorts; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -1044,6 +1045,7 @@ public AddPathBasedCacheDirectiveResponseProto addPathBasedCacheDirective( } PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder(). setPath(new Path(proto.getPath())). + setReplication(Shorts.checkedCast(proto.getReplication())). setPool(proto.getPool()). build(); PathBasedCacheDescriptor descriptor = @@ -1090,6 +1092,7 @@ public ListPathBasedCacheDescriptorsResponseProto listPathBasedCacheDescriptors( ListPathBasedCacheDescriptorsElementProto.newBuilder(). setId(directive.getEntryId()). setPath(directive.getPath().toUri().getPath()). + setReplication(directive.getReplication()). setPool(directive.getPool())); prevId = directive.getEntryId(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index ea3dfc19ce7..e37b8af4601 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -148,6 +148,7 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.token.Token; +import com.google.common.primitives.Shorts; import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; @@ -1011,12 +1012,14 @@ public PathBasedCacheDescriptor addPathBasedCacheDirective( AddPathBasedCacheDirectiveRequestProto.newBuilder(); builder.setDirective(PathBasedCacheDirectiveProto.newBuilder() .setPath(directive.getPath().toUri().getPath()) + .setReplication(directive.getReplication()) .setPool(directive.getPool()) .build()); AddPathBasedCacheDirectiveResponseProto result = rpcProxy.addPathBasedCacheDirective(null, builder.build()); return new PathBasedCacheDescriptor(result.getDescriptorId(), - directive.getPath(), directive.getPool()); + directive.getPath(), directive.getReplication(), + directive.getPool()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -1048,7 +1051,9 @@ public PathBasedCacheDescriptor get(int i) { ListPathBasedCacheDescriptorsElementProto elementProto = response.getElements(i); return new PathBasedCacheDescriptor(elementProto.getId(), - new Path(elementProto.getPath()), elementProto.getPool()); + new Path(elementProto.getPath()), + Shorts.checkedCast(elementProto.getReplication()), + elementProto.getPool()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index c639bf2e492..739a98f7fc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -248,7 +248,8 @@ PathBasedCacheDescriptor unprotectedAddDirective( // Add a new entry with the next available ID. PathBasedCacheEntry entry; entry = new PathBasedCacheEntry(getNextEntryId(), - directive.getPath().toUri().getPath(), pool); + directive.getPath().toUri().getPath(), + directive.getReplication(), pool); unprotectedAddEntry(entry); @@ -597,10 +598,12 @@ private synchronized void loadEntries(DataInput in) throws IOException { for (int i = 0; i < numberOfEntries; i++) { long entryId = in.readLong(); String path = Text.readString(in); + short replication = in.readShort(); String poolName = Text.readString(in); // Get pool reference by looking it up in the map CachePool pool = cachePools.get(poolName); - PathBasedCacheEntry entry = new PathBasedCacheEntry(entryId, path, pool); + PathBasedCacheEntry entry = + new PathBasedCacheEntry(entryId, path, replication, pool); unprotectedAddEntry(entry); counter.increment(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 9ae790efba1..10ab5658283 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -959,6 +959,7 @@ void logAddPathBasedCacheDirective(PathBasedCacheDirective directive, AddPathBasedCacheDirectiveOp op = AddPathBasedCacheDirectiveOp.getInstance( cache.get()) .setPath(directive.getPath().toUri().getPath()) + .setReplication(directive.getReplication()) .setPool(directive.getPool()); logRpcIds(op, toLogRpcIds); logEdit(op); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 61cc2d0ba4f..09d363ce68e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -644,6 +644,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, AddPathBasedCacheDirectiveOp addOp = (AddPathBasedCacheDirectiveOp) op; PathBasedCacheDirective d = new PathBasedCacheDirective.Builder(). setPath(new Path(addOp.path)). + setReplication(addOp.replication). setPool(addOp.pool). build(); PathBasedCacheDescriptor descriptor = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 9a9e1994982..e7123390ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -2862,8 +2862,8 @@ public String toString() { } static class AddPathBasedCacheDirectiveOp extends FSEditLogOp { - String path; + short replication; String pool; public AddPathBasedCacheDirectiveOp() { @@ -2880,6 +2880,11 @@ public AddPathBasedCacheDirectiveOp setPath(String path) { return this; } + public AddPathBasedCacheDirectiveOp setReplication(short replication) { + this.replication = replication; + return this; + } + public AddPathBasedCacheDirectiveOp setPool(String pool) { this.pool = pool; return this; @@ -2888,24 +2893,29 @@ public AddPathBasedCacheDirectiveOp setPool(String pool) { @Override void readFields(DataInputStream in, int logVersion) throws IOException { this.path = FSImageSerialization.readString(in); + this.replication = FSImageSerialization.readShort(in); this.pool = FSImageSerialization.readString(in); } @Override public void writeFields(DataOutputStream out) throws IOException { FSImageSerialization.writeString(path, out); + FSImageSerialization.writeShort(replication, out); FSImageSerialization.writeString(pool, out); } @Override protected void toXml(ContentHandler contentHandler) throws SAXException { XMLUtils.addSaxString(contentHandler, "PATH", path); + XMLUtils.addSaxString(contentHandler, "REPLICATION", + Short.toString(replication)); XMLUtils.addSaxString(contentHandler, "POOL", pool); } @Override void fromXml(Stanza st) throws InvalidXmlException { path = st.getValue("PATH"); + replication = Short.parseShort(st.getValue("REPLICATION")); pool = st.getValue("POOL"); } @@ -2914,6 +2924,7 @@ public String toString() { StringBuilder builder = new StringBuilder(); builder.append("AddPathBasedCacheDirective ["); builder.append("path=" + path + ","); + builder.append("replication=" + replication + ","); builder.append("pool=" + pool + "]"); return builder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index c4633c137c5..ebbfba86e36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -139,6 +139,8 @@ public String getLongUsage() { TableListing listing = getOptionDescriptionListing(); listing.addRow("", "A path to cache. The path can be " + "a directory or a file."); + listing.addRow("", "The cache replication factor to use. " + + "Defaults to 1."); listing.addRow("", "The pool to which the directive will be " + "added. You must have write permission on the cache pool " + "in order to add new directives."); @@ -154,6 +156,12 @@ public int run(Configuration conf, List args) throws IOException { System.err.println("You must specify a path with -path."); return 1; } + short replication = 1; + String replicationString = + StringUtils.popOptionWithArgument("-replication", args); + if (replicationString != null) { + replication = Short.parseShort(replicationString); + } String poolName = StringUtils.popOptionWithArgument("-pool", args); if (poolName == null) { System.err.println("You must specify a pool name with -pool."); @@ -167,9 +175,9 @@ public int run(Configuration conf, List args) throws IOException { DistributedFileSystem dfs = getDFS(conf); PathBasedCacheDirective directive = new PathBasedCacheDirective.Builder(). setPath(new Path(path)). + setReplication(replication). setPool(poolName). build(); - try { PathBasedCacheDescriptor descriptor = dfs.addPathBasedCacheDirective(directive); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 411fc16ab09..25b128de38e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -245,6 +245,7 @@ private void processCacheManagerState(DataInputStream in, ImageVisitor v) final int numEntries = in.readInt(); for (int i=0; i 63 /bar + 1 poolparty From af89caf9e4ff1b8113737d6b948bb43a42f1b715 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 15 Oct 2013 00:19:48 +0000 Subject: [PATCH 38/51] HDFS-5359. Allow LightWeightGSet#Iterator to remove elements. (Contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532153 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/util/LightWeightCache.java | 25 ++++ .../apache/hadoop/util/LightWeightGSet.java | 49 +++++--- .../hadoop/util/TestLightWeightGSet.java | 110 ++++++++++++++++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + 4 files changed, 173 insertions(+), 14 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java index 7e7ad2c3458..a0a553af103 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java @@ -18,6 +18,7 @@ package org.apache.hadoop.util; import java.util.Comparator; +import java.util.Iterator; import java.util.PriorityQueue; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -235,4 +236,28 @@ public E remove(K key) { } return removed; } + + @Override + public Iterator iterator() { + final Iterator iter = super.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public E next() { + return iter.next(); + } + + @Override + public void remove() { + // It would be tricky to support this because LightWeightCache#remove + // may evict multiple elements via evictExpiredEntries. + throw new UnsupportedOperationException("Remove via iterator is " + + "not supported for LightWeightCache"); + } + }; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java index 768606969fc..50e291d46c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java @@ -246,10 +246,10 @@ public void printDetails(final PrintStream out) { private class SetIterator implements Iterator { /** The starting modification for fail-fast. */ - private final int startModification = modification; + private int iterModification = modification; /** The current index of the entry array. */ private int index = -1; - /** The next element to return. */ + private LinkedElement cur = null; private LinkedElement next = nextNonemptyEntry(); /** Find the next nonempty entry starting at (index + 1). */ @@ -258,30 +258,51 @@ private LinkedElement nextNonemptyEntry() { return index < entries.length? entries[index]: null; } + private void ensureNext() { + if (modification != iterModification) { + throw new ConcurrentModificationException("modification=" + modification + + " != iterModification = " + iterModification); + } + if (next != null) { + return; + } + if (cur == null) { + return; + } + next = cur.getNext(); + if (next == null) { + next = nextNonemptyEntry(); + } + } + @Override public boolean hasNext() { + ensureNext(); return next != null; } @Override public E next() { - if (modification != startModification) { - throw new ConcurrentModificationException("modification=" + modification - + " != startModification = " + startModification); + ensureNext(); + if (next == null) { + throw new IllegalStateException("There are no more elements"); } - - final E e = convert(next); - - //find the next element - final LinkedElement n = next.getNext(); - next = n != null? n: nextNonemptyEntry(); - - return e; + cur = next; + next = null; + return convert(cur); } + @SuppressWarnings("unchecked") @Override public void remove() { - throw new UnsupportedOperationException("Remove is not supported."); + ensureNext(); + if (cur == null) { + throw new IllegalStateException("There is no current element " + + "to remove"); + } + LightWeightGSet.this.remove((K)cur); + iterModification++; + cur = null; } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java new file mode 100644 index 00000000000..671dd37cf47 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightGSet.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.util.LightWeightGSet.LinkedElement; +import org.junit.Assert; +import org.junit.Test; + +/** Testing {@link LightWeightGSet} */ +public class TestLightWeightGSet { + public static final Log LOG = LogFactory.getLog(TestLightWeightGSet.class); + + private static ArrayList getRandomList(int length, int randomSeed) { + Random random = new Random(randomSeed); + ArrayList list = new ArrayList(length); + for (int i = 0; i < length; i++) { + list.add(random.nextInt()); + } + return list; + } + + private static class TestElement implements LightWeightGSet.LinkedElement { + private final int val; + private LinkedElement next; + + TestElement(int val) { + this.val = val; + this.next = null; + } + + public int getVal() { + return val; + } + + @Override + public void setNext(LinkedElement next) { + this.next = next; + } + + @Override + public LinkedElement getNext() { + return next; + } + } + + @Test(timeout=60000) + public void testRemoveAllViaIterator() { + ArrayList list = getRandomList(100, 123); + LightWeightGSet set = + new LightWeightGSet(16); + for (Integer i : list) { + set.put(new TestElement(i)); + } + for (Iterator iter = set.iterator(); + iter.hasNext(); ) { + iter.next(); + iter.remove(); + } + Assert.assertEquals(0, set.size()); + } + + @Test(timeout=60000) + public void testRemoveSomeViaIterator() { + ArrayList list = getRandomList(100, 123); + LightWeightGSet set = + new LightWeightGSet(16); + for (Integer i : list) { + set.put(new TestElement(i)); + } + long sum = 0; + for (Iterator iter = set.iterator(); + iter.hasNext(); ) { + sum += iter.next().getVal(); + } + long mode = sum / set.size(); + LOG.info("Removing all elements above " + mode); + for (Iterator iter = set.iterator(); + iter.hasNext(); ) { + int item = iter.next().getVal(); + if (item > mode) { + iter.remove(); + } + } + for (Iterator iter = set.iterator(); + iter.hasNext(); ) { + Assert.assertTrue(iter.next().getVal() <= mode); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index e585bbe4539..9ab55864cc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -63,6 +63,9 @@ HDFS-4949 (Unreleased) HDFS-5358. Add replication field to PathBasedCacheDirective. (Contributed by Colin Patrick McCabe) + HDFS-5359. Allow LightWeightGSet#Iterator to remove elements. + (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) From 2cf5084c322a0fc04bd58d2fda59660a84aa035b Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Wed, 16 Oct 2013 18:58:38 +0000 Subject: [PATCH 39/51] HDFS-5358: Add updated editsStored file for tests. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532863 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/test/resources/editsStored | Bin 4497 -> 4499 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index a764fb61990c05ff91d5fffe4dadf36e769fdf24..ffd6601a9b115ce81f4ded2ff9981baa83ec0482 100644 GIT binary patch delta 29 lcmbQJJXv|eZax7<2F`-~{G5WsqLNDgE18EjpX58i1OS~@3yJ^$ delta 27 jcmbQNJW+YWZazNFg8clPg2bYd%93BPA~xgkMRm{ From 8da82eba1c84f828617a13a6f785a9b6cfc057a5 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Wed, 16 Oct 2013 20:23:14 +0000 Subject: [PATCH 40/51] HDFS-5373. hdfs cacheadmin -addDirective short usage does not mention -replication parameter. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532888 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 +++ .../main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java | 3 ++- .../hadoop-hdfs/src/test/resources/testCacheAdminConf.xml | 7 ++++++- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index 9ab55864cc9..cc73edf9164 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -93,3 +93,6 @@ HDFS-4949 (Unreleased) HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is improperly configured. (Colin Patrick McCabe) + + HDFS-5373. hdfs cacheadmin -addDirective short usage does not mention + -replication parameter. (cnauroth) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java index ebbfba86e36..0aa93169d80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java @@ -131,7 +131,8 @@ public String getName() { @Override public String getShortUsage() { - return "[" + getName() + " -path -pool ]\n"; + return "[" + getName() + + " -path -replication -pool ]\n"; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index 07fb44cc331..2e7506dcbef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -167,6 +167,7 @@ -addPool pool1 -addDirective -path /foo -pool pool1 -addDirective -path /bar -pool pool1 + -addDirective -path /baz -replication 2 -pool pool1 -listDirectives -pool pool1 @@ -175,7 +176,7 @@ SubstringComparator - Found 2 entries + Found 3 entries SubstringComparator @@ -185,6 +186,10 @@ SubstringComparator 2 pool1 /bar + + SubstringComparator + 3 pool1 /baz + From 3cc7a38a53c8ae27ef6b2397cddc5d14a378203a Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 16 Oct 2013 22:15:33 +0000 Subject: [PATCH 41/51] HDFS-5096. Automatically cache new data added to a cached path (contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532924 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/util/IntrusiveCollection.java | 373 +++++++++++ .../hadoop-hdfs/CHANGES-HDFS-4949.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 + .../blockmanagement/BlockCollection.java | 11 - .../server/blockmanagement/BlockInfo.java | 4 +- .../server/blockmanagement/BlockManager.java | 206 +++++- .../CacheReplicationManager.java | 607 ----------------- .../CacheReplicationMonitor.java | 617 +++++++++++------- .../CacheReplicationPolicy.java | 129 ---- .../blockmanagement/DatanodeDescriptor.java | 193 ++---- .../blockmanagement/DatanodeManager.java | 76 ++- .../blockmanagement/InvalidateBlocks.java | 75 ++- .../InvalidateStoredBlocks.java | 67 -- .../PendingReplicationBlocks.java | 29 +- .../blockmanagement/ReportProcessor.java | 271 -------- .../server/blockmanagement/UncacheBlocks.java | 44 -- .../hdfs/server/datanode/BPOfferService.java | 2 + .../fsdataset/impl/FsDatasetCache.java | 18 - .../hdfs/server/namenode/CacheManager.java | 515 ++++++++++----- .../hdfs/server/namenode/FSDirectory.java | 66 +- .../hdfs/server/namenode/FSEditLogLoader.java | 11 +- .../hdfs/server/namenode/FSNamesystem.java | 71 +- .../hdfs/server/namenode/INodeFile.java | 14 - .../hadoop/hdfs/server/namenode/NameNode.java | 9 +- .../server/namenode/NameNodeRpcServer.java | 4 +- .../src/main/resources/hdfs-default.xml | 12 + .../TestCacheReplicationManager.java | 256 -------- .../blockmanagement/TestCachedBlocksList.java | 151 +++++ .../namenode/TestPathBasedCacheRequests.java | 322 ++++++++- 29 files changed, 1976 insertions(+), 2183 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCachedBlocksList.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java new file mode 100644 index 00000000000..0512d4aa5d1 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java @@ -0,0 +1,373 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; + +import com.google.common.base.Preconditions; + +/** + * Implements an intrusive doubly-linked list. + * + * An intrusive linked list is one in which the elements themselves are + * responsible for storing the pointers to previous and next elements. + * This can save a lot of memory if there are many elements in the list or + * many lists. + */ +@InterfaceAudience.Private +public class IntrusiveCollection + implements Collection { + /** + * An element contained in this list. + * + * We pass the list itself as a parameter so that elements can belong to + * multiple lists. (The element will need to store separate prev and next + * pointers for each.) + */ + @InterfaceAudience.Private + public interface Element { + /** + * Insert this element into the list. This is the first thing that will + * be called on the element. + */ + void insertInternal(IntrusiveCollection list, + Element prev, Element next); + + /** + * Set the prev pointer of an element already in the list. + */ + void setPrev(IntrusiveCollection list, Element prev); + + /** + * Set the next pointer of an element already in the list. + */ + void setNext(IntrusiveCollection list, Element next); + + /** + * Remove an element from the list. This is the last thing that will be + * called on an element. + */ + void removeInternal(IntrusiveCollection list); + + /** + * Get the prev pointer of an element. + */ + Element getPrev(IntrusiveCollection list); + + /** + * Get the next pointer of an element. + */ + Element getNext(IntrusiveCollection list); + + /** + * Returns true if this element is in the provided list. + */ + boolean isInList(IntrusiveCollection list); + } + + private Element root = new Element() { + // We keep references to the first and last elements for easy access. + Element first = this; + Element last = this; + + @Override + public void insertInternal(IntrusiveCollection list, + Element prev, Element next) { + throw new RuntimeException("Can't insert root element"); + } + + @Override + public void setPrev(IntrusiveCollection list, + Element prev) { + Preconditions.checkState(list == IntrusiveCollection.this); + last = prev; + } + + @Override + public void setNext(IntrusiveCollection list, + Element next) { + Preconditions.checkState(list == IntrusiveCollection.this); + first = next; + } + + @Override + public void removeInternal(IntrusiveCollection list) { + throw new RuntimeException("Can't remove root element"); + } + + @Override + public Element getNext( + IntrusiveCollection list) { + Preconditions.checkState(list == IntrusiveCollection.this); + return first; + } + + @Override + public Element getPrev( + IntrusiveCollection list) { + Preconditions.checkState(list == IntrusiveCollection.this); + return last; + } + + @Override + public boolean isInList(IntrusiveCollection list) { + return list == IntrusiveCollection.this; + } + + @Override + public String toString() { + return "root"; // + IntrusiveCollection.this + "]"; + } + }; + + private int size = 0; + + /** + * An iterator over the intrusive collection. + * + * Currently, you can remove elements from the list using + * #{IntrusiveIterator#remove()}, but modifying the collection in other + * ways during the iteration is not supported. + */ + public class IntrusiveIterator implements Iterator { + Element cur; + Element next; + + IntrusiveIterator() { + this.cur = root; + this.next = null; + } + + @Override + public boolean hasNext() { + if (next == null) { + next = cur.getNext(IntrusiveCollection.this); + } + return next != root; + } + + @SuppressWarnings("unchecked") + @Override + public E next() { + if (next == null) { + next = cur.getNext(IntrusiveCollection.this); + } + if (next == root) { + throw new NoSuchElementException(); + } + cur = next; + next = null; + return (E)cur; + } + + @Override + public void remove() { + if (cur == null) { + throw new IllegalStateException("Already called remove " + + "once on this element."); + } + next = removeElement(cur); + cur = null; + } + } + + private Element removeElement(Element elem) { + Element prev = elem.getPrev(IntrusiveCollection.this); + Element next = elem.getNext(IntrusiveCollection.this); + elem.removeInternal(IntrusiveCollection.this); + prev.setNext(IntrusiveCollection.this, next); + next.setPrev(IntrusiveCollection.this, prev); + size--; + return next; + } + + /** + * Get an iterator over the list. This can be used to remove elements. + * It is not safe to do concurrent modifications from other threads while + * using this iterator. + * + * @return The iterator. + */ + public Iterator iterator() { + return new IntrusiveIterator(); + } + + @Override + public int size() { + return size; + } + + @Override + public boolean isEmpty() { + return size == 0; + } + + @Override + public boolean contains(Object o) { + try { + Element element = (Element)o; + return element.isInList(this); + } catch (ClassCastException e) { + return false; + } + } + + @Override + public Object[] toArray() { + Object ret[] = new Object[size]; + int i = 0; + for (Iterator iter = iterator(); iter.hasNext(); ) { + ret[i++] = iter.next(); + } + return ret; + } + + @SuppressWarnings("unchecked") + @Override + public T[] toArray(T[] array) { + if (array.length < size) { + return (T[])toArray(); + } else { + int i = 0; + for (Iterator iter = iterator(); iter.hasNext(); ) { + array[i++] = (T)iter.next(); + } + } + return array; + } + + /** + * Add an element to the end of the list. + * + * @param elem The new element to add. + */ + @Override + public boolean add(E elem) { + if (elem == null) { + return false; + } + if (elem.isInList(this)) { + return false; + } + Element prev = root.getPrev(IntrusiveCollection.this); + prev.setNext(IntrusiveCollection.this, elem); + root.setPrev(IntrusiveCollection.this, elem); + elem.insertInternal(IntrusiveCollection.this, prev, root); + size++; + return true; + } + + /** + * Add an element to the front of the list. + * + * @param elem The new element to add. + */ + public boolean addFirst(Element elem) { + if (elem == null) { + return false; + } + if (elem.isInList(this)) { + return false; + } + Element next = root.getNext(IntrusiveCollection.this); + next.setPrev(IntrusiveCollection.this, elem); + root.setNext(IntrusiveCollection.this, elem); + elem.insertInternal(IntrusiveCollection.this, root, next); + size++; + return true; + } + + public static final Log LOG = LogFactory.getLog(IntrusiveCollection.class); + + @Override + public boolean remove(Object o) { + try { + Element elem = (Element)o; + if (!elem.isInList(this)) { + return false; + } + removeElement(elem); + return true; + } catch (ClassCastException e) { + return false; + } + } + + @Override + public boolean containsAll(Collection collection) { + for (Object o : collection) { + if (!contains(o)) { + return false; + } + } + return true; + } + + @Override + public boolean addAll(Collection collection) { + boolean changed = false; + for (E elem : collection) { + if (add(elem)) { + changed = true; + } + } + return changed; + } + + @Override + public boolean removeAll(Collection collection) { + boolean changed = false; + for (Object elem : collection) { + if (remove(elem)) { + changed = true; + } + } + return changed; + } + + @Override + public boolean retainAll(Collection collection) { + boolean changed = false; + for (Iterator iter = iterator(); + iter.hasNext(); ) { + Element elem = iter.next(); + if (!collection.contains(elem)) { + iter.remove(); + changed = true; + } + } + return changed; + } + + /** + * Remove all elements. + */ + @Override + public void clear() { + for (Iterator iter = iterator(); iter.hasNext(); ) { + iter.next(); + iter.remove(); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt index cc73edf9164..863a8a1a304 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt @@ -66,6 +66,9 @@ HDFS-4949 (Unreleased) HDFS-5359. Allow LightWeightGSet#Iterator to remove elements. (Contributed by Colin Patrick McCabe) + HDFS-5096. Automatically cache new data added to a cached path. + (Contributed by Colin Patrick McCabe) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 841a8955c4f..5d2b14c50e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -205,6 +205,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES = "dfs.namenode.list.cache.descriptors.num.responses"; public static final int DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT = 100; + public static final String DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS = + "dfs.namenode.path.based.cache.refresh.interval.ms"; + public static final long DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT = 300000L; // Whether to enable datanode's stale state detection and usage for reads public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index ba970e07f28..f344833a0c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -60,17 +60,6 @@ public interface BlockCollection { */ public short getBlockReplication(); - /** - * Set cache replication factor for the collection - */ - public void setCacheReplication(short cacheReplication); - - /** - * Get cache replication factor for the collection - * @return cache replication value - */ - public short getCacheReplication(); - /** * Get the name of the collection. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index 8307180cedd..7376f00b5d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -85,7 +85,7 @@ public void setBlockCollection(BlockCollection bc) { this.bc = bc; } - DatanodeDescriptor getDatanode(int index) { + public DatanodeDescriptor getDatanode(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; return (DatanodeDescriptor)triplets[index*3]; @@ -153,7 +153,7 @@ private BlockInfo setNext(int index, BlockInfo to) { return info; } - int getCapacity() { + public int getCapacity() { assert this.triplets != null : "BlockInfo is not initialized"; assert triplets.length % 3 == 0 : "Malformed BlockInfo"; return triplets.length / 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ac121a109c5..53699fb2aad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -77,13 +77,14 @@ import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import com.google.common.collect.Sets; /** * Keeps information related to the blocks stored in the Hadoop cluster. */ @InterfaceAudience.Private -public class BlockManager extends ReportProcessor { +public class BlockManager { static final Log LOG = LogFactory.getLog(BlockManager.class); public static final Log blockLog = NameNode.blockStateChangeLog; @@ -162,7 +163,7 @@ public int getPendingDataNodeMessageCount() { final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** Blocks to be invalidated. */ - private final InvalidateStoredBlocks invalidateBlocks; + private final InvalidateBlocks invalidateBlocks; /** * After a failover, over-replicated blocks may not be handled @@ -218,6 +219,7 @@ public int getPendingDataNodeMessageCount() { final boolean encryptDataTransfer; // Max number of blocks to log info about during a block report. + private final long maxNumBlocksToLog; /** * When running inside a Standby node, the node may receive block reports @@ -235,11 +237,10 @@ public int getPendingDataNodeMessageCount() { public BlockManager(final Namesystem namesystem, final FSClusterStats stats, final Configuration conf) throws IOException { - super(conf); this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); - invalidateBlocks = new InvalidateStoredBlocks(datanodeManager); + invalidateBlocks = new InvalidateBlocks(datanodeManager); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); @@ -299,7 +300,11 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, this.encryptDataTransfer = conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); - + + this.maxNumBlocksToLog = + conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, + DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); + LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); @@ -999,7 +1004,6 @@ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { * Adds block to list of blocks which will be invalidated on specified * datanode and log the operation */ - @Override // ReportProcessor void addToInvalidates(final Block block, final DatanodeInfo datanode) { invalidateBlocks.add(block, datanode, true); } @@ -1045,8 +1049,7 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason), dn); } - @Override // ReportProcessor - void markBlockAsCorrupt(BlockToMarkCorrupt b, + private void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) throws IOException { DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { @@ -1056,7 +1059,7 @@ void markBlockAsCorrupt(BlockToMarkCorrupt b, BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { - blockLogInfo("#markBlockAsCorrupt: " + b + blockLog.info("BLOCK markBlockAsCorrupt: " + b + " cannot be marked as corrupt as it does not belong to any file"); addToInvalidates(b.corrupted, node); return; @@ -1120,9 +1123,6 @@ public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } - public boolean shouldPostponeBlocksFromFuture() { - return this.shouldPostponeBlocksFromFuture; - } private void postponeBlock(Block blk) { if (postponedMisreplicatedBlocks.add(blk)) { @@ -1544,6 +1544,61 @@ private void processPendingReplications() { */ } } + + /** + * StatefulBlockInfo is used to build the "toUC" list, which is a list of + * updates to the information about under-construction blocks. + * Besides the block in question, it provides the ReplicaState + * reported by the datanode in the block report. + */ + private static class StatefulBlockInfo { + final BlockInfoUnderConstruction storedBlock; + final ReplicaState reportedState; + + StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, + ReplicaState reportedState) { + this.storedBlock = storedBlock; + this.reportedState = reportedState; + } + } + + /** + * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a + * list of blocks that should be considered corrupt due to a block report. + */ + private static class BlockToMarkCorrupt { + /** The corrupted block in a datanode. */ + final BlockInfo corrupted; + /** The corresponding block stored in the BlockManager. */ + final BlockInfo stored; + /** The reason to mark corrupt. */ + final String reason; + + BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason) { + Preconditions.checkNotNull(corrupted, "corrupted is null"); + Preconditions.checkNotNull(stored, "stored is null"); + + this.corrupted = corrupted; + this.stored = stored; + this.reason = reason; + } + + BlockToMarkCorrupt(BlockInfo stored, String reason) { + this(stored, stored, reason); + } + + BlockToMarkCorrupt(BlockInfo stored, long gs, String reason) { + this(new BlockInfo(stored), stored, reason); + //the corrupted block in datanode has a different generation stamp + corrupted.setGenerationStamp(gs); + } + + @Override + public String toString() { + return corrupted + "(" + + (corrupted == stored? "same as stored": "stored=" + stored) + ")"; + } + } /** * The given datanode is reporting all its blocks. @@ -1635,6 +1690,46 @@ private void rescanPostponedMisreplicatedBlocks() { } } + private void processReport(final DatanodeDescriptor node, + final BlockListAsLongs report) throws IOException { + // Normal case: + // Modify the (block-->datanode) map, according to the difference + // between the old and new block report. + // + Collection toAdd = new LinkedList(); + Collection toRemove = new LinkedList(); + Collection toInvalidate = new LinkedList(); + Collection toCorrupt = new LinkedList(); + Collection toUC = new LinkedList(); + reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); + + // Process the blocks on each queue + for (StatefulBlockInfo b : toUC) { + addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState); + } + for (Block b : toRemove) { + removeStoredBlock(b, node); + } + int numBlocksLogged = 0; + for (BlockInfo b : toAdd) { + addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog); + numBlocksLogged++; + } + if (numBlocksLogged > maxNumBlocksToLog) { + blockLog.info("BLOCK* processReport: logged info for " + maxNumBlocksToLog + + " of " + numBlocksLogged + " reported."); + } + for (Block b : toInvalidate) { + blockLog.info("BLOCK* processReport: " + + b + " on " + node + " size " + b.getNumBytes() + + " does not belong to any file"); + addToInvalidates(b, node); + } + for (BlockToMarkCorrupt b : toCorrupt) { + markBlockAsCorrupt(b, node); + } + } + /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. @@ -1697,6 +1792,44 @@ private void processFirstBlockReport(final DatanodeDescriptor node, } } + private void reportDiff(DatanodeDescriptor dn, + BlockListAsLongs newReport, + Collection toAdd, // add to DatanodeDescriptor + Collection toRemove, // remove from DatanodeDescriptor + Collection toInvalidate, // should be removed from DN + Collection toCorrupt, // add to corrupt replicas list + Collection toUC) { // add to under-construction list + // place a delimiter in the list which separates blocks + // that have been reported from those that have not + BlockInfo delimiter = new BlockInfo(new Block(), 1); + boolean added = dn.addBlock(delimiter); + assert added : "Delimiting block cannot be present in the node"; + int headIndex = 0; //currently the delimiter is in the head of the list + int curIndex; + + if (newReport == null) + newReport = new BlockListAsLongs(); + // scan the report and process newly reported blocks + BlockReportIterator itBR = newReport.getBlockReportIterator(); + while(itBR.hasNext()) { + Block iblk = itBR.next(); + ReplicaState iState = itBR.getCurrentReplicaState(); + BlockInfo storedBlock = processReportedBlock(dn, iblk, iState, + toAdd, toInvalidate, toCorrupt, toUC); + // move block to the head of the list + if (storedBlock != null && (curIndex = storedBlock.findDatanode(dn)) >= 0) { + headIndex = dn.moveBlockToHead(storedBlock, curIndex, headIndex); + } + } + // collect blocks that have not been reported + // all of them are next to the delimiter + Iterator it = new DatanodeDescriptor.BlockIterator( + delimiter.getNext(0), dn); + while(it.hasNext()) + toRemove.add(it.next()); + dn.removeBlock(delimiter); + } + /** * Process a block replica reported by the data-node. * No side effects except adding to the passed-in Collections. @@ -1728,8 +1861,7 @@ private void processFirstBlockReport(final DatanodeDescriptor node, * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ - @Override // ReportProcessor - BlockInfo processReportedBlock(final DatanodeDescriptor dn, + private BlockInfo processReportedBlock(final DatanodeDescriptor dn, final Block block, final ReplicaState reportedState, final Collection toAdd, final Collection toInvalidate, @@ -1956,7 +2088,6 @@ private boolean isBlockUnderConstruction(BlockInfo storedBlock, } } - @Override // ReportProcessor void addStoredBlockUnderConstruction( BlockInfoUnderConstruction block, DatanodeDescriptor node, @@ -2012,8 +2143,7 @@ private void addStoredBlockImmediate(BlockInfo storedBlock, * needed replications if this takes care of the problem. * @return the block that is stored in blockMap. */ - @Override // ReportProcessor - Block addStoredBlock(final BlockInfo block, + private Block addStoredBlock(final BlockInfo block, DatanodeDescriptor node, DatanodeDescriptor delNodeHint, boolean logEveryBlock) @@ -2028,7 +2158,7 @@ Block addStoredBlock(final BlockInfo block, } if (storedBlock == null || storedBlock.getBlockCollection() == null) { // If this block does not belong to anyfile, then we are done. - blockLogInfo("#addStoredBlock: " + block + " on " + blockLog.info("BLOCK* addStoredBlock: " + block + " on " + node + " size " + block.getNumBytes() + " but it does not belong to any file"); // we could add this block to invalidate set of this datanode. @@ -2050,7 +2180,7 @@ Block addStoredBlock(final BlockInfo block, } } else { curReplicaDelta = 0; - blockLogWarn("#addStoredBlock: " + blockLog.warn("BLOCK* addStoredBlock: " + "Redundant addStoredBlock request received for " + storedBlock + " on " + node + " size " + storedBlock.getNumBytes()); } @@ -2108,6 +2238,20 @@ Block addStoredBlock(final BlockInfo block, return storedBlock; } + private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { + if (!blockLog.isInfoEnabled()) { + return; + } + + StringBuilder sb = new StringBuilder(500); + sb.append("BLOCK* addStoredBlock: blockMap updated: ") + .append(node) + .append(" is added to "); + storedBlock.appendStringTo(sb); + sb.append(" size " ) + .append(storedBlock.getNumBytes()); + blockLog.info(sb); + } /** * Invalidate corrupt replicas. *

@@ -2989,6 +3133,13 @@ public Iterator getCorruptReplicaBlockIterator() { UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); } + /** + * Get the replicas which are corrupt for a given block. + */ + public Collection getCorruptReplicas(Block block) { + return corruptReplicas.getNodes(block); + } + /** @return the size of UnderReplicatedBlocks */ public int numOfUnderReplicatedBlocks() { return neededReplications.size(); @@ -3129,21 +3280,4 @@ enum MisReplicationResult { public void shutdown() { blocksMap.close(); } - - @Override // ReportProcessor - int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, - int curIndex, int headIndex) { - return dn.moveBlockToHead(storedBlock, curIndex, headIndex); - } - - @Override // ReportProcessor - boolean addBlock(DatanodeDescriptor dn, BlockInfo block) { - return dn.addBlock(block); - } - - @Override // ReportProcessor - boolean removeBlock(DatanodeDescriptor dn, BlockInfo block) { - return dn.removeBlock(block); - } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java deleted file mode 100644 index 6481d026ac3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationManager.java +++ /dev/null @@ -1,607 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.Namesystem; -import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; -import org.apache.hadoop.hdfs.util.LightWeightHashSet; -import org.apache.hadoop.util.Time; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -/** - * Analogue of the BlockManager class for cached replicas. Maintains the mapping - * of cached blocks to datanodes via processing datanode cache reports. Based on - * these reports and addition and removal of caching directives in the - * CacheManager, the CacheReplicationManager will schedule caching and uncaching - * work. - * - * The CacheReplicationManager does not have a separate lock, so depends on - * taking the namesystem lock as appropriate. - */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationManager extends ReportProcessor { - - private static final Log LOG = - LogFactory.getLog(CacheReplicationManager.class); - - // Statistics - private volatile long pendingCacheBlocksCount = 0L; - private volatile long underCachedBlocksCount = 0L; - private volatile long scheduledCacheBlocksCount = 0L; - - /** Used by metrics */ - public long getPendingCacheBlocksCount() { - return pendingCacheBlocksCount; - } - /** Used by metrics */ - public long getUnderCachedBlocksCount() { - return underCachedBlocksCount; - } - /** Used by metrics */ - public long getScheduledCacheBlocksCount() { - return scheduledCacheBlocksCount; - } - /** Used by metrics */ - public long getPendingBlocksToUncacheCount() { - return blocksToUncache.numBlocks(); - } - - private final Namesystem namesystem; - private final BlockManager blockManager; - private final DatanodeManager datanodeManager; - private final boolean isCachingEnabled; - - /** - * Mapping of blocks to datanodes where the block is cached - */ - final BlocksMap cachedBlocksMap; - /** - * Blocks to be uncached - */ - private final UncacheBlocks blocksToUncache; - /** - * Blocks that need to be cached - */ - private final LightWeightHashSet neededCacheBlocks; - /** - * Blocks that are being cached - */ - private final PendingReplicationBlocks pendingCacheBlocks; - - /** - * Executor for the CacheReplicationMonitor thread - */ - private ExecutorService monitor = null; - - private final Configuration conf; - - public CacheReplicationManager(final Namesystem namesystem, - final BlockManager blockManager, final DatanodeManager datanodeManager, - final FSClusterStats stats, final Configuration conf) throws IOException { - super(conf); - this.namesystem = namesystem; - this.blockManager = blockManager; - this.datanodeManager = datanodeManager; - this.conf = conf; - isCachingEnabled = conf.getBoolean( - DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, - DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT); - if (isCachingEnabled) { - cachedBlocksMap = new BlocksMap(BlockManager.DEFAULT_MAP_LOAD_FACTOR); - blocksToUncache = new UncacheBlocks(); - pendingCacheBlocks = new PendingReplicationBlocks(1000 * conf.getInt( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT)); - neededCacheBlocks = new LightWeightHashSet(); - } else { - cachedBlocksMap = null; - blocksToUncache = null; - pendingCacheBlocks = null; - neededCacheBlocks = null; - } - } - - public void activate() { - if (isCachingEnabled) { - pendingCacheBlocks.start(); - this.monitor = Executors.newSingleThreadExecutor( - new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat(CacheReplicationMonitor.class.toString()) - .build()); - monitor.submit(new CacheReplicationMonitor(namesystem, blockManager, - datanodeManager, this, blocksToUncache, neededCacheBlocks, - pendingCacheBlocks, conf)); - monitor.shutdown(); - } - } - - public void close() { - if (isCachingEnabled) { - monitor.shutdownNow(); - try { - monitor.awaitTermination(3000, TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - } - pendingCacheBlocks.stop(); - cachedBlocksMap.close(); - } - } - - public void clearQueues() { - if (isCachingEnabled) { - blocksToUncache.clear(); - synchronized (neededCacheBlocks) { - neededCacheBlocks.clear(); - } - pendingCacheBlocks.clear(); - } - } - - public boolean isCachingEnabled() { - return isCachingEnabled; - } - - /** - * @return desired cache replication factor of the block - */ - short getCacheReplication(Block block) { - final BlockCollection bc = blockManager.blocksMap.getBlockCollection(block); - return bc == null ? 0 : bc.getCacheReplication(); - } - - public void setCachedLocations(LocatedBlock block) { - BlockInfo blockInfo = cachedBlocksMap.getStoredBlock( - block.getBlock().getLocalBlock()); - for (int i=0; i it = cachedBlocksMap.nodeIterator(block); - short numCached = 0; - while (it.hasNext()) { - it.next(); - numCached++; - } - return numCached; - } - - /** - * The given datanode is reporting all of its cached blocks. - * Update the cache state of blocks in the block map. - */ - public void processCacheReport(final DatanodeID nodeID, final String poolId, - final BlockListAsLongs newReport) throws IOException { - if (!isCachingEnabled) { - String error = "cacheReport received from datanode " + nodeID - + " but caching is disabled on the namenode (" - + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY + ")"; - LOG.warn(error + ", ignoring"); - throw new IOException(error); - } - namesystem.writeLock(); - final long startTime = Time.now(); //after acquiring write lock - final long endTime; - try { - final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); - if (node == null || !node.isAlive) { - throw new IOException( - "processCacheReport from dead or unregistered node: " + nodeID); - } - - // TODO: do an optimized initial cache report while in startup safemode - if (namesystem.isInStartupSafeMode()) { - blockLogInfo("#processCacheReport: " - + "discarded cache report from " + nodeID - + " because namenode still in startup phase"); - return; - } - - processReport(node, newReport); - - // TODO: process postponed blocks reported while a standby - //rescanPostponedMisreplicatedBlocks(); - } finally { - endTime = Time.now(); - namesystem.writeUnlock(); - } - - // Log the block report processing stats from Namenode perspective - final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); - if (metrics != null) { - metrics.addCacheBlockReport((int) (endTime - startTime)); - } - blockLogInfo("#processCacheReport: from " - + nodeID + ", blocks: " + newReport.getNumberOfBlocks() - + ", processing time: " + (endTime - startTime) + " msecs"); - } - - @Override // ReportProcessor - void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) - throws IOException { - throw new UnsupportedOperationException("Corrupt blocks should not be in" - + " the cache report"); - } - - @Override // ReportProcessor - void addToInvalidates(final Block b, final DatanodeInfo node) { - blocksToUncache.add(b, node, true); - } - - @Override // ReportProcessor - void addStoredBlockUnderConstruction( - BlockInfoUnderConstruction storedBlock, DatanodeDescriptor node, - ReplicaState reportedState) { - throw new UnsupportedOperationException("Under-construction blocks" - + " should not be in the cache report"); - } - - @Override // ReportProcessor - int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, - int curIndex, int headIndex) { - return dn.moveCachedBlockToHead(storedBlock, curIndex, headIndex); - } - - @Override // ReportProcessor - boolean addBlock(DatanodeDescriptor dn, BlockInfo block) { - return dn.addCachedBlock(block); - } - - @Override // ReportProcessor - boolean removeBlock(DatanodeDescriptor dn, BlockInfo block) { - return dn.removeCachedBlock(block); - } - - /** - * Similar to processReportedBlock. Simpler since it doesn't need to worry - * about under construction and corrupt replicas. - * - * @return Updated BlockInfo for the block if it should be kept, null if - * it is to be invalidated. - */ - @Override // ReportProcessor - BlockInfo processReportedBlock(final DatanodeDescriptor dn, - final Block block, final ReplicaState reportedState, - final Collection toAdd, - final Collection toInvalidate, - Collection toCorrupt, - Collection toUC) { - - if (LOG.isDebugEnabled()) { - LOG.debug("Reported cached block " + block - + " on " + dn + " size " + block.getNumBytes() - + " replicaState = " + reportedState); - } - - final boolean shouldPostponeBlocksFromFuture = - blockManager.shouldPostponeBlocksFromFuture(); - if (shouldPostponeBlocksFromFuture && - namesystem.isGenStampInFuture(block)) { - // TODO: queuing cache operations on the standby - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " has a " - + "genstamp from the future and namenode is in standby mode," - + " ignoring"); - } - return null; - } - - BlockInfo storedBlock = blockManager.blocksMap.getStoredBlock(block); - if (storedBlock == null) { - // If blocksMap does not contain reported block id, - // the BlockManager will take care of invalidating it, and the datanode - // will automatically uncache at that point. - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " not found " - + "in blocksMap, ignoring"); - } - return null; - } - - BlockUCState ucState = storedBlock.getBlockUCState(); - - // Datanodes currently only will cache completed replicas. - // Let's just invalidate anything that's not completed and the right - // genstamp and number of bytes. - if (!ucState.equals(BlockUCState.COMPLETE) || - block.getGenerationStamp() != storedBlock.getGenerationStamp() || - block.getNumBytes() != storedBlock.getNumBytes()) { - if (shouldPostponeBlocksFromFuture) { - // TODO: queuing cache operations on the standby - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " has a " - + "mismatching genstamp or length and namenode is in standby" - + " mode, ignoring"); - } - return null; - } else { - toInvalidate.add(block); - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " scheduled" - + " for uncaching because it is misreplicated" - + " or under construction."); - } - return null; - } - } - - // It's a keeper - - // Could be present in blocksMap and not in cachedBlocksMap, add it - BlockInfo cachedBlock = cachedBlocksMap.getStoredBlock(block); - if (cachedBlock == null) { - cachedBlock = new BlockInfo(block, 0); - cachedBlocksMap.addBlockCollection(cachedBlock, - storedBlock.getBlockCollection()); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("In memory blockUCState = " + ucState); - } - - // Ignore replicas that are already scheduled for removal - if (blocksToUncache.contains(dn.getStorageID(), block)) { - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " is already" - + " scheduled to be uncached, not adding it to the cachedBlocksMap"); - } - return cachedBlock; - } - - // add replica if not already present in the cached block map - if (reportedState == ReplicaState.FINALIZED - && cachedBlock.findDatanode(dn) < 0) { - toAdd.add(cachedBlock); - } - if (LOG.isTraceEnabled()) { - LOG.trace("processReportedBlock: block " + block + " scheduled" - + " to be added to cachedBlocksMap"); - } - return cachedBlock; - } - - /** - * Modify (cached block-->datanode) map with a newly cached block. Remove - * block from set of needed cache replications if this takes care of the - * problem. - * - * @return the block that is stored in cachedBlockMap. - */ - @Override // ReportProcessor - Block addStoredBlock(final BlockInfo block, DatanodeDescriptor node, - DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { - assert block != null && namesystem.hasWriteLock(); - BlockInfo cachedBlock = block; - if (cachedBlock == null || cachedBlock.getBlockCollection() == null) { - // If this block does not belong to anyfile, then we are done. - blockLogInfo("#addStoredBlock: " + block + " on " - + node + " size " + block.getNumBytes() - + " but it does not belong to any file"); - // we could add this block to invalidate set of this datanode. - // it will happen in next block report otherwise. - return block; - } - - BlockCollection bc = cachedBlock.getBlockCollection(); - - // add block to the datanode - boolean added = node.addCachedBlock(cachedBlock); - - int curReplicaDelta; - if (added) { - curReplicaDelta = 1; - if (logEveryBlock) { - logAddStoredBlock(cachedBlock, node); - } - } else { - curReplicaDelta = 0; - blockLogWarn("#addStoredBlock: " - + "Redundant addCachedBlock request received for " + cachedBlock - + " on " + node + " size " + cachedBlock.getNumBytes()); - } - - // Remove it from pending list if present - pendingCacheBlocks.decrement(block, node); - - // Now check for completion of blocks and safe block count - int numCachedReplicas = getNumCached(cachedBlock); - int numEffectiveCachedReplica = numCachedReplicas - + pendingCacheBlocks.getNumReplicas(cachedBlock); - - // if file is under construction, then done for now - if (bc instanceof MutableBlockCollection) { - return cachedBlock; - } - - // do not try to handle over/under-replicated blocks during first safe mode - if (!namesystem.isPopulatingReplQueues()) { - return cachedBlock; - } - - // Under-replicated - short cacheReplication = bc.getCacheReplication(); - if (numEffectiveCachedReplica >= cacheReplication) { - synchronized (neededCacheBlocks) { - neededCacheBlocks.remove(cachedBlock); - } - } else { - updateNeededCaching(cachedBlock, curReplicaDelta, 0); - } - - // Over-replicated, we don't need this new replica - if (numEffectiveCachedReplica > cacheReplication) { - blocksToUncache.add(cachedBlock, node, true); - } - - return cachedBlock; - } - - /** - * Modify (cached block-->datanode) map. Possibly generate replication tasks, - * if the removed block is still valid. - */ - @Override // ReportProcessor - void removeStoredBlock(Block block, DatanodeDescriptor node) { - blockLogDebug("#removeStoredBlock: " + block + " from " + node); - assert (namesystem.hasWriteLock()); - { - if (!cachedBlocksMap.removeNode(block, node)) { - blockLogDebug("#removeStoredBlock: " - + block + " has already been removed from node " + node); - return; - } - - // Prune the block from the map if it's the last cache replica - if (cachedBlocksMap.getStoredBlock(block).numNodes() == 0) { - cachedBlocksMap.removeBlock(block); - } - - // - // It's possible that the block was removed because of a datanode - // failure. If the block is still valid, check if replication is - // necessary. In that case, put block on a possibly-will- - // be-replicated list. - // - BlockCollection bc = blockManager.blocksMap.getBlockCollection(block); - if (bc != null) { - updateNeededCaching(block, -1, 0); - } - } - } - - /** - * Reduce cache replication factor to the new replication by randomly - * choosing replicas to invalidate. - */ - private void processOverCachedBlock(final Block block, - final short replication) { - assert namesystem.hasWriteLock(); - List nodes = getSafeReplicas(cachedBlocksMap, block); - List targets = - CacheReplicationPolicy.chooseTargetsToUncache(nodes, replication); - for (DatanodeDescriptor dn: targets) { - blocksToUncache.add(block, dn, true); - } - } - - /** Set replication for the blocks. */ - public void setCacheReplication(final short oldRepl, final short newRepl, - final String src, final Block... blocks) { - if (!isCachingEnabled) { - LOG.warn("Attempted to set cache replication for " + src + " but caching" - + " is disabled (" + DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY - + "), ignoring"); - return; - } - if (newRepl == oldRepl) { - return; - } - - // update needReplication priority queues - for (Block b : blocks) { - updateNeededCaching(b, 0, newRepl-oldRepl); - } - - if (oldRepl > newRepl) { - // old replication > the new one; need to remove copies - LOG.info("Decreasing cache replication from " + oldRepl + " to " + newRepl - + " for " + src); - for (Block b : blocks) { - processOverCachedBlock(b, newRepl); - } - } else { // replication factor is increased - LOG.info("Increasing cache replication from " + oldRepl + " to " + newRepl - + " for " + src); - } - } - - /** updates a block in under replicated queue */ - private void updateNeededCaching(final Block block, - final int curReplicasDelta, int expectedReplicasDelta) { - namesystem.writeLock(); - try { - if (!namesystem.isPopulatingReplQueues()) { - return; - } - final int numCached = getNumCached(block); - final int curExpectedReplicas = getCacheReplication(block); - if (numCached < curExpectedReplicas) { - neededCacheBlocks.add(block); - } else { - synchronized (neededCacheBlocks) { - neededCacheBlocks.remove(block); - } - } - } finally { - namesystem.writeUnlock(); - } - } - - /** - * Return the safe replicas (not corrupt or decomissioning/decommissioned) of - * a block in a BlocksMap - */ - List getSafeReplicas(BlocksMap map, Block block) { - List nodes = new ArrayList(3); - Collection corrupted = - blockManager.corruptReplicas.getNodes(block); - Iterator it = map.nodeIterator(block); - while (it.hasNext()) { - DatanodeDescriptor dn = it.next(); - // Don't count a decommissioned or decommissioning nodes - if (dn.isDecommissioned() || dn.isDecommissionInProgress()) { - continue; - } - // Don't count a corrupted node - if (corrupted != null && corrupted.contains(dn)) { - continue; - } - nodes.add(dn); - } - return nodes; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index b6255460a25..5d9c39c16a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -19,284 +19,435 @@ import static org.apache.hadoop.util.ExitUtil.terminate; -import java.util.ArrayList; -import java.util.HashMap; +import java.io.Closeable; +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; -import java.util.Map.Entry; +import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.Namesystem; -import org.apache.hadoop.hdfs.util.LightWeightHashSet; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; +import org.apache.hadoop.hdfs.server.namenode.CacheManager; +import org.apache.hadoop.hdfs.server.namenode.CachedBlock; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.util.GSet; +import org.apache.hadoop.util.Time; /** - * Periodically computes new replication work. This consists of two tasks: - * - * 1) Assigning blocks in the neededCacheBlocks to datanodes where they will be - * cached. This moves them to the pendingCacheBlocks list. - * - * 2) Placing caching tasks in pendingCacheBlocks that have timed out - * back into neededCacheBlocks for reassignment. + * Scans the namesystem, scheduling blocks to be cached as appropriate. + * + * The CacheReplicationMonitor does a full scan when the NameNode first + * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) -class CacheReplicationMonitor implements Runnable { +public class CacheReplicationMonitor extends Thread implements Closeable { private static final Log LOG = LogFactory.getLog(CacheReplicationMonitor.class); - private static final Log blockLog = NameNode.blockStateChangeLog; + private final FSNamesystem namesystem; - private final Namesystem namesystem; private final BlockManager blockManager; - private final DatanodeManager datanodeManager; - private final CacheReplicationManager cacheReplManager; - private final UncacheBlocks blocksToUncache; - private final LightWeightHashSet neededCacheBlocks; - private final PendingReplicationBlocks pendingCacheBlocks; + private final CacheManager cacheManager; + + private final GSet cachedBlocks; /** - * Re-check period for computing cache replication work + * Pseudorandom number source */ - private final long cacheReplicationRecheckInterval; + private final Random random = new Random(); - public CacheReplicationMonitor(Namesystem namesystem, - BlockManager blockManager, DatanodeManager datanodeManager, - CacheReplicationManager cacheReplManager, - UncacheBlocks blocksToUncache, - LightWeightHashSet neededCacheBlocks, - PendingReplicationBlocks pendingCacheBlocks, - Configuration conf) { + /** + * The interval at which we scan the namesystem for caching changes. + */ + private final long intervalMs; + + /** + * True if we should rescan immediately, regardless of how much time + * elapsed since the previous scan. + */ + private boolean rescanImmediately; + + /** + * The monotonic time at which the current scan started. + */ + private long scanTimeMs; + + /** + * Mark status of the current scan. + */ + private boolean mark = false; + + /** + * True if this monitor should terminate. + */ + private boolean shutdown; + + /** + * Cache directives found in the previous scan. + */ + private int scannedDirectives; + + /** + * Blocks found in the previous scan. + */ + private long scannedBlocks; + + public CacheReplicationMonitor(FSNamesystem namesystem, + CacheManager cacheManager, long intervalMs) { this.namesystem = namesystem; - this.blockManager = blockManager; - this.datanodeManager = datanodeManager; - this.cacheReplManager = cacheReplManager; - - this.blocksToUncache = blocksToUncache; - this.neededCacheBlocks = neededCacheBlocks; - this.pendingCacheBlocks = pendingCacheBlocks; - - this.cacheReplicationRecheckInterval = conf.getInt( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; + this.blockManager = namesystem.getBlockManager(); + this.cacheManager = cacheManager; + this.cachedBlocks = cacheManager.getCachedBlocks(); + this.intervalMs = intervalMs; } @Override public void run() { - LOG.info("CacheReplicationMonitor is starting"); - while (namesystem.isRunning()) { - try { - computeCachingWork(); - processPendingCachingWork(); - Thread.sleep(cacheReplicationRecheckInterval); - } catch (Throwable t) { - if (!namesystem.isRunning()) { - LOG.info("Stopping CacheReplicationMonitor."); - if (!(t instanceof InterruptedException)) { - LOG.info("CacheReplicationMonitor received an exception" - + " while shutting down.", t); - } - break; - } - LOG.fatal("ReplicationMonitor thread received Runtime exception. ", t); - terminate(1, t); - } - } - } - - /** - * Assigns under-cached blocks to new datanodes. - */ - private void computeCachingWork() { - List blocksToCache = null; - namesystem.writeLock(); + shutdown = false; + rescanImmediately = true; + scanTimeMs = 0; + LOG.info("Starting CacheReplicationMonitor with interval " + + intervalMs + " milliseconds"); try { - synchronized (neededCacheBlocks) { - blocksToCache = neededCacheBlocks.pollAll(); - } - } finally { - namesystem.writeUnlock(); - } - computeCachingWorkForBlocks(blocksToCache); - computeUncacheWork(); - } - - private void computeCachingWorkForBlocks(List blocksToCache) { - int requiredRepl, effectiveRepl, additionalRepl; - List cachedNodes, storedNodes, targets; - - final HashMap> work = - new HashMap>(); - namesystem.writeLock(); - try { - synchronized (neededCacheBlocks) { - for (Block block: blocksToCache) { - // Required number of cached replicas - requiredRepl = cacheReplManager.getCacheReplication(block); - // Replicas that are safely cached - cachedNodes = cacheReplManager.getSafeReplicas( - cacheReplManager.cachedBlocksMap, block); - // Replicas that are safely stored on disk - storedNodes = cacheReplManager.getSafeReplicas( - blockManager.blocksMap, block); - // "effective" replication factor which includes pending - // replication work - effectiveRepl = cachedNodes.size() - + pendingCacheBlocks.getNumReplicas(block); - if (effectiveRepl >= requiredRepl) { - neededCacheBlocks.remove(block); - blockLog.info("BLOCK* Removing " + block - + " from neededCacheBlocks as it has enough cached replicas"); - continue; - } - // Choose some replicas to cache if needed - additionalRepl = requiredRepl - effectiveRepl; - targets = new ArrayList(storedNodes.size()); - // Only target replicas that aren't already cached. - for (DatanodeDescriptor dn: storedNodes) { - if (!cachedNodes.contains(dn)) { - targets.add(dn); + long curTimeMs = Time.monotonicNow(); + while (true) { + synchronized(this) { + while (true) { + if (shutdown) { + LOG.info("Shutting down CacheReplicationMonitor"); + return; } - } - if (targets.size() < additionalRepl) { - if (LOG.isDebugEnabled()) { - LOG.debug("Block " + block + " cannot be cached on additional" - + " nodes because there are no more available datanodes" - + " with the block on disk."); + if (rescanImmediately) { + LOG.info("Rescanning on request"); + rescanImmediately = false; + break; } - } - targets = CacheReplicationPolicy.chooseTargetsToCache(block, targets, - additionalRepl); - if (targets.size() < additionalRepl) { - if (LOG.isDebugEnabled()) { - LOG.debug("Block " + block + " cannot be cached on additional" - + " nodes because there is not sufficient cache space on" - + " available target datanodes."); + long delta = (scanTimeMs + intervalMs) - curTimeMs; + if (delta <= 0) { + LOG.info("Rescanning after " + (curTimeMs - scanTimeMs) + + " milliseconds"); + break; } - } - // Continue if we couldn't get more cache targets - if (targets.size() == 0) { - continue; - } - - // Update datanodes and blocks that were scheduled for caching - work.put(block, targets); - // Schedule caching on the targets - for (DatanodeDescriptor target: targets) { - target.addBlockToBeCached(block); - } - // Add block to the pending queue - pendingCacheBlocks.increment(block, - targets.toArray(new DatanodeDescriptor[] {})); - if (blockLog.isDebugEnabled()) { - blockLog.debug("BLOCK* block " + block - + " is moved from neededCacheBlocks to pendingCacheBlocks"); - } - // Remove from needed queue if it will be fully replicated - if (effectiveRepl + targets.size() >= requiredRepl) { - neededCacheBlocks.remove(block); + this.wait(delta); + curTimeMs = Time.monotonicNow(); } } + scanTimeMs = curTimeMs; + mark = !mark; + rescan(); + curTimeMs = Time.monotonicNow(); + LOG.info("Scanned " + scannedDirectives + " directive(s) and " + + scannedBlocks + " block(s) in " + (curTimeMs - scanTimeMs) + " " + + "millisecond(s)."); } - } finally { - namesystem.writeUnlock(); - } - - if (blockLog.isInfoEnabled()) { - // log which blocks have been scheduled for replication - for (Entry> item : work.entrySet()) { - Block block = item.getKey(); - List nodes = item.getValue(); - StringBuilder targetList = new StringBuilder("datanode(s)"); - for (DatanodeDescriptor node: nodes) { - targetList.append(' '); - targetList.append(node); - } - blockLog.info("BLOCK* ask " + targetList + " to cache " + block); - } - } - - if (blockLog.isDebugEnabled()) { - blockLog.debug( - "BLOCK* neededCacheBlocks = " + neededCacheBlocks.size() - + " pendingCacheBlocks = " + pendingCacheBlocks.size()); + } catch (Throwable t) { + LOG.fatal("Thread exiting", t); + terminate(1, t); } } /** - * Reassign pending caching work that has timed out - */ - private void processPendingCachingWork() { - Block[] timedOutItems = pendingCacheBlocks.getTimedOutBlocks(); - if (timedOutItems != null) { - namesystem.writeLock(); - try { - for (int i = 0; i < timedOutItems.length; i++) { - Block block = timedOutItems[i]; - final short numCached = cacheReplManager.getNumCached(block); - final short cacheReplication = - cacheReplManager.getCacheReplication(block); - // Needs to be cached if under-replicated - if (numCached < cacheReplication) { - synchronized (neededCacheBlocks) { - neededCacheBlocks.add(block); - } - } - } - } finally { - namesystem.writeUnlock(); - } - } - } - - /** - * Schedule blocks for uncaching at datanodes - * @return total number of block for deletion - */ - int computeUncacheWork() { - final List nodes = blocksToUncache.getStorageIDs(); - int blockCnt = 0; - for (String node: nodes) { - blockCnt += uncachingWorkForOneNode(node); - } - return blockCnt; - } - - /** - * Gets the list of blocks scheduled for uncaching at a datanode and - * schedules them for uncaching. + * Kick the monitor thread. * - * @return number of blocks scheduled for removal + * If it is sleeping, it will wake up and start scanning. + * If it is currently scanning, it will finish the scan and immediately do + * another one. */ - private int uncachingWorkForOneNode(String nodeId) { - final List toInvalidate; - final DatanodeDescriptor dn; + public synchronized void kick() { + rescanImmediately = true; + this.notifyAll(); + } + /** + * Shut down and join the monitor thread. + */ + @Override + public void close() throws IOException { + synchronized(this) { + if (shutdown) return; + shutdown = true; + this.notifyAll(); + } + try { + if (this.isAlive()) { + this.join(60000); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + private void rescan() { + scannedDirectives = 0; + scannedBlocks = 0; namesystem.writeLock(); try { - // get blocks to invalidate for the nodeId - assert nodeId != null; - dn = datanodeManager.getDatanode(nodeId); - if (dn == null) { - blocksToUncache.remove(nodeId); - return 0; - } - toInvalidate = blocksToUncache.invalidateWork(nodeId, dn); - if (toInvalidate == null) { - return 0; - } + rescanPathBasedCacheEntries(); } finally { namesystem.writeUnlock(); } - if (blockLog.isInfoEnabled()) { - blockLog.info("BLOCK* " + getClass().getSimpleName() - + ": ask " + dn + " to uncache " + toInvalidate); + namesystem.writeLock(); + try { + rescanCachedBlockMap(); + } finally { + namesystem.writeUnlock(); + } + } + + /** + * Scan all PathBasedCacheEntries. Use the information to figure out + * what cache replication factor each block should have. + * + * @param mark Whether the current scan is setting or clearing the mark + */ + private void rescanPathBasedCacheEntries() { + FSDirectory fsDir = namesystem.getFSDirectory(); + for (PathBasedCacheEntry pce : cacheManager.getEntriesById().values()) { + scannedDirectives++; + String path = pce.getPath(); + INode node; + try { + node = fsDir.getINode(path); + } catch (UnresolvedLinkException e) { + // We don't cache through symlinks + continue; + } + if (node == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("No inode found at " + path); + } + } else if (node.isDirectory()) { + INodeDirectory dir = node.asDirectory(); + ReadOnlyList children = dir.getChildrenList(null); + for (INode child : children) { + if (child.isFile()) { + rescanFile(pce, child.asFile()); + } + } + } else if (node.isFile()) { + rescanFile(pce, node.asFile()); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Ignoring non-directory, non-file inode " + node + + " found at " + path); + } + } + } + } + + /** + * Apply a PathBasedCacheEntry to a file. + * + * @param pce The PathBasedCacheEntry to apply. + * @param file The file. + */ + private void rescanFile(PathBasedCacheEntry pce, INodeFile file) { + BlockInfo[] blockInfos = file.getBlocks(); + for (BlockInfo blockInfo : blockInfos) { + if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) { + // We don't try to cache blocks that are under construction. + continue; + } + Block block = new Block(blockInfo.getBlockId()); + CachedBlock ncblock = new CachedBlock(block.getBlockId(), + pce.getReplication(), mark); + CachedBlock ocblock = cachedBlocks.get(ncblock); + if (ocblock == null) { + cachedBlocks.put(ncblock); + } else { + if (mark != ocblock.getMark()) { + // Mark hasn't been set in this scan, so update replication and mark. + ocblock.setReplicationAndMark(pce.getReplication(), mark); + } else { + // Mark already set in this scan. Set replication to highest value in + // any PathBasedCacheEntry that covers this file. + ocblock.setReplicationAndMark((short)Math.max( + pce.getReplication(), ocblock.getReplication()), mark); + } + } + } + } + + /** + * Scan through the cached block map. + * Any blocks which are under-replicated should be assigned new Datanodes. + * Blocks that are over-replicated should be removed from Datanodes. + */ + private void rescanCachedBlockMap() { + for (Iterator cbIter = cachedBlocks.iterator(); + cbIter.hasNext(); ) { + scannedBlocks++; + CachedBlock cblock = cbIter.next(); + List pendingCached = + cblock.getDatanodes(Type.PENDING_CACHED); + List cached = + cblock.getDatanodes(Type.CACHED); + List pendingUncached = + cblock.getDatanodes(Type.PENDING_UNCACHED); + // Remove nodes from PENDING_UNCACHED if they were actually uncached. + for (Iterator iter = pendingUncached.iterator(); + iter.hasNext(); ) { + DatanodeDescriptor datanode = iter.next(); + if (!cblock.isInList(datanode.getCached())) { + datanode.getPendingUncached().remove(cblock); + iter.remove(); + } + } + // If the block's mark doesn't match with the mark of this scan, that + // means that this block couldn't be reached during this scan. That means + // it doesn't need to be cached any more. + int neededCached = (cblock.getMark() != mark) ? + 0 : cblock.getReplication(); + int numCached = cached.size(); + if (numCached >= neededCached) { + // If we have enough replicas, drop all pending cached. + for (DatanodeDescriptor datanode : pendingCached) { + datanode.getPendingCached().remove(cblock); + } + pendingCached.clear(); + } + if (numCached < neededCached) { + // If we don't have enough replicas, drop all pending uncached. + for (DatanodeDescriptor datanode : pendingUncached) { + datanode.getPendingUncached().remove(cblock); + } + pendingUncached.clear(); + } + int neededUncached = numCached - + (pendingUncached.size() + neededCached); + if (neededUncached > 0) { + addNewPendingUncached(neededUncached, cblock, cached, + pendingUncached); + } else { + int additionalCachedNeeded = neededCached - + (numCached + pendingCached.size()); + if (additionalCachedNeeded > 0) { + addNewPendingCached(additionalCachedNeeded, cblock, cached, + pendingCached); + } + } + if ((neededCached == 0) && + pendingUncached.isEmpty() && + pendingCached.isEmpty()) { + // we have nothing more to do with this block. + cbIter.remove(); + } + } + } + + /** + * Add new entries to the PendingUncached list. + * + * @param neededUncached The number of replicas that need to be uncached. + * @param cachedBlock The block which needs to be uncached. + * @param cached A list of DataNodes currently caching the block. + * @param pendingUncached A list of DataNodes that will soon uncache the + * block. + */ + private void addNewPendingUncached(int neededUncached, + CachedBlock cachedBlock, List cached, + List pendingUncached) { + if (!cacheManager.isActive()) { + return; + } + // Figure out which replicas can be uncached. + LinkedList possibilities = + new LinkedList(); + for (DatanodeDescriptor datanode : cached) { + if (!pendingUncached.contains(datanode)) { + possibilities.add(datanode); + } + } + while (neededUncached > 0) { + if (possibilities.isEmpty()) { + LOG.warn("Logic error: we're trying to uncache more replicas than " + + "actually exist for " + cachedBlock); + return; + } + DatanodeDescriptor datanode = + possibilities.remove(random.nextInt(possibilities.size())); + pendingUncached.add(datanode); + boolean added = datanode.getPendingUncached().add(cachedBlock); + assert added; + neededUncached--; + } + } + + /** + * Add new entries to the PendingCached list. + * + * @param neededCached The number of replicas that need to be cached. + * @param cachedBlock The block which needs to be cached. + * @param cached A list of DataNodes currently caching the block. + * @param pendingCached A list of DataNodes that will soon cache the + * block. + */ + private void addNewPendingCached(int neededCached, + CachedBlock cachedBlock, List cached, + List pendingCached) { + if (!cacheManager.isActive()) { + return; + } + // To figure out which replicas can be cached, we consult the + // blocksMap. We don't want to try to cache a corrupt replica, though. + BlockInfo blockInfo = blockManager. + getStoredBlock(new Block(cachedBlock.getBlockId())); + if (blockInfo == null) { + LOG.debug("Not caching block " + cachedBlock + " because it " + + "was deleted from all DataNodes."); + return; + } + if (!blockInfo.isComplete()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Not caching block " + cachedBlock + " because it " + + "is not yet complete."); + } + return; + } + List possibilities = new LinkedList(); + int numReplicas = blockInfo.getCapacity(); + Collection corrupt = + blockManager.getCorruptReplicas(blockInfo); + for (int i = 0; i < numReplicas; i++) { + DatanodeDescriptor datanode = blockInfo.getDatanode(i); + if ((datanode != null) && + ((!pendingCached.contains(datanode)) && + ((corrupt == null) || (!corrupt.contains(datanode))))) { + possibilities.add(datanode); + } + } + while (neededCached > 0) { + if (possibilities.isEmpty()) { + LOG.warn("We need " + neededCached + " more replica(s) than " + + "actually exist to provide a cache replication of " + + cachedBlock.getReplication() + " for " + cachedBlock); + return; + } + DatanodeDescriptor datanode = + possibilities.remove(random.nextInt(possibilities.size())); + if (LOG.isDebugEnabled()) { + LOG.debug("AddNewPendingCached: datanode " + datanode + + " will now cache block " + cachedBlock); + } + pendingCached.add(datanode); + boolean added = datanode.getPendingCached().add(cachedBlock); + assert added; + neededCached--; } - return toInvalidate.size(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java deleted file mode 100644 index 3bd19331ea3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationPolicy.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map.Entry; -import java.util.TreeMap; - -import org.apache.commons.math.random.RandomData; -import org.apache.commons.math.random.RandomDataImpl; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.Block; - -/** - * Helper class used by the CacheReplicationManager and CacheReplicationMonitor - * to select datanodes where blocks should be cached or uncached. - */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) -public class CacheReplicationPolicy { - - // Not thread-safe, but only accessed by the CacheReplicationMonitor - private static RandomData random = new RandomDataImpl(); - - /** - * @return List of datanodes with sufficient capacity to cache the block - */ - private static List selectSufficientCapacity(Block block, - List targets) { - List sufficient = - new ArrayList(targets.size()); - for (DatanodeDescriptor dn: targets) { - long remaining = dn.getCacheRemaining(); - if (remaining >= block.getNumBytes()) { - sufficient.add(dn); - } - } - return sufficient; - } - - /** - * Returns a random datanode from targets, weighted by the amount of free - * cache capacity on the datanode. - * - * @param block Block to be cached - * @param targets List of potential cache targets - * @return a random DN, or null if no datanodes are available or have enough - * cache capacity. - */ - private static DatanodeDescriptor randomDatanodeByRemainingCache(Block block, - List targets) { - // Hold a lottery biased by the amount of free space to decide - // who gets the block - Collections.shuffle(targets); - TreeMap lottery = - new TreeMap(); - long totalCacheAvailable = 0; - for (DatanodeDescriptor dn: targets) { - long remaining = dn.getCacheRemaining(); - totalCacheAvailable += remaining; - lottery.put(totalCacheAvailable, dn); - } - // Pick our lottery winner - long winningTicket = random.nextLong(0, totalCacheAvailable - 1); - Entry winner = lottery.higherEntry(winningTicket); - return winner.getValue(); - } - - /** - * Chooses numTargets new cache replicas for a block from a list of targets. - * Will return fewer targets than requested if not enough nodes are available. - * - * @return List of target datanodes - */ - static List chooseTargetsToCache(Block block, - List targets, int numTargets) { - List sufficient = - selectSufficientCapacity(block, targets); - List chosen = - new ArrayList(numTargets); - for (int i = 0; i < numTargets && !sufficient.isEmpty(); i++) { - DatanodeDescriptor choice = - randomDatanodeByRemainingCache(block, sufficient); - chosen.add(choice); - sufficient.remove(choice); - } - return chosen; - } - - /** - * Given a list cache replicas where a block is cached, choose replicas to - * uncache to drop the cache replication factor down to replication. - * - * @param nodes list of datanodes where the block is currently cached - * @param replication desired replication factor - * @return List of datanodes to uncache - */ - public static List chooseTargetsToUncache( - List nodes, short replication) { - final int effectiveReplication = nodes.size(); - List targets = - new ArrayList(effectiveReplication); - Collections.shuffle(nodes); - final int additionalTargetsNeeded = effectiveReplication - replication; - int chosen = 0; - while (chosen < additionalTargetsNeeded && !nodes.isEmpty()) { - targets.add(nodes.get(chosen)); - chosen++; - } - return targets; - } - -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 4fd06d3cd16..7c0eb79b0b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -28,7 +28,9 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.util.LightWeightHashSet; +import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -97,6 +99,65 @@ synchronized void clear() { } } + /** + * A list of CachedBlock objects on this datanode. + */ + public static class CachedBlocksList extends IntrusiveCollection { + public enum Type { + PENDING_CACHED, + CACHED, + PENDING_UNCACHED + } + + private final DatanodeDescriptor datanode; + + private final Type type; + + CachedBlocksList(DatanodeDescriptor datanode, Type type) { + this.datanode = datanode; + this.type = type; + } + + public DatanodeDescriptor getDatanode() { + return datanode; + } + + public Type getType() { + return type; + } + } + + /** + * The blocks which we want to cache on this DataNode. + */ + private final CachedBlocksList pendingCached = + new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED); + + /** + * The blocks which we know are cached on this datanode. + * This list is updated by periodic cache reports. + */ + private final CachedBlocksList cached = + new CachedBlocksList(this, CachedBlocksList.Type.CACHED); + + /** + * The blocks which we want to uncache on this DataNode. + */ + private final CachedBlocksList pendingUncached = + new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED); + + public CachedBlocksList getPendingCached() { + return pendingCached; + } + + public CachedBlocksList getCached() { + return cached; + } + + public CachedBlocksList getPendingUncached() { + return pendingUncached; + } + /** * Head of the list of blocks on the datanode */ @@ -106,15 +167,6 @@ synchronized void clear() { */ private int numBlocks = 0; - /** - * Head of the list of cached blocks on the datanode - */ - private volatile BlockInfo cachedBlockList = null; - /** - * Number of cached blocks on the datanode - */ - private int numCachedBlocks = 0; - // isAlive == heartbeats.contains(this) // This is an optimization, because contains takes O(n) time on Arraylist public boolean isAlive = false; @@ -154,12 +206,6 @@ synchronized void clear() { /** A set of blocks to be invalidated by this datanode */ private LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); - /** A queue of blocks to be cached by this datanode */ - private BlockQueue cacheBlocks = new BlockQueue(); - /** A set of blocks to be uncached by this datanode */ - private LightWeightHashSet blocksToUncache = - new LightWeightHashSet(); - /* Variables for maintaining number of blocks scheduled to be written to * this datanode. This count is approximate and might be slightly bigger * in case of errors (e.g. datanode does not report if an error occurs @@ -286,43 +332,6 @@ int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) { return curIndex; } - /** - * Add block to the list of cached blocks on the data-node. - * @return true if block was successfully added, false if already present - */ - public boolean addCachedBlock(BlockInfo b) { - if (!b.addNode(this)) - return false; - // add to the head of the data-node list - cachedBlockList = b.listInsert(cachedBlockList, this); - numCachedBlocks++; - return true; - } - - /** - * Remove block from the list of cached blocks on the data-node. - * @return true if block was successfully removed, false if not present - */ - public boolean removeCachedBlock(BlockInfo b) { - cachedBlockList = b.listRemove(cachedBlockList, this); - if (b.removeNode(this)) { - numCachedBlocks--; - return true; - } else { - return false; - } - } - - /** - * Move block to the head of the list of cached blocks on the data-node. - * @return the index of the head of the blockList - */ - int moveCachedBlockToHead(BlockInfo b, int curIndex, int headIndex) { - cachedBlockList = b.moveBlockToHead(cachedBlockList, this, curIndex, - headIndex); - return curIndex; - } - /** * Used for testing only * @return the head of the blockList @@ -332,11 +341,6 @@ protected BlockInfo getHead(){ return blockList; } - @VisibleForTesting - protected BlockInfo getCachedHead() { - return cachedBlockList; - } - /** * Replace specified old block with a new one in the DataNodeDescriptor. * @@ -359,10 +363,13 @@ public void resetBlocks() { setDfsUsed(0); setXceiverCount(0); this.blockList = null; - this.cachedBlockList = null; this.invalidateBlocks.clear(); - this.blocksToUncache.clear(); this.volumeFailures = 0; + // pendingCached, cached, and pendingUncached are protected by the + // FSN lock. + this.pendingCached.clear(); + this.cached.clear(); + this.pendingUncached.clear(); } public void clearBlockQueues() { @@ -371,20 +378,17 @@ public void clearBlockQueues() { this.recoverBlocks.clear(); this.replicateBlocks.clear(); } - synchronized(blocksToUncache) { - this.blocksToUncache.clear(); - this.cacheBlocks.clear(); - } + // pendingCached, cached, and pendingUncached are protected by the + // FSN lock. + this.pendingCached.clear(); + this.cached.clear(); + this.pendingUncached.clear(); } public int numBlocks() { return numBlocks; } - public int numCachedBlocks() { - return numCachedBlocks; - } - /** * Updates stats from datanode heartbeat. */ @@ -438,10 +442,6 @@ public Iterator getBlockIterator() { return new BlockIterator(this.blockList, this); } - public Iterator getCachedBlockIterator() { - return new BlockIterator(this.cachedBlockList, this); - } - /** * Store block replication work. */ @@ -450,14 +450,6 @@ void addBlockToBeReplicated(Block block, DatanodeDescriptor[] targets) { replicateBlocks.offer(new BlockTargetPair(block, targets)); } - /** - * Store block caching work. - */ - void addBlockToBeCached(Block block) { - assert(block != null); - cacheBlocks.offer(block); - } - /** * Store block recovery work. */ @@ -482,18 +474,6 @@ void addBlocksToBeInvalidated(List blocklist) { } } - /** - * Store block uncaching work. - */ - void addBlocksToBeUncached(List blocklist) { - assert(blocklist != null && blocklist.size() > 0); - synchronized (blocksToUncache) { - for (Block blk : blocklist) { - blocksToUncache.add(blk); - } - } - } - /** * The number of work items that are pending to be replicated */ @@ -501,13 +481,6 @@ int getNumberOfBlocksToBeReplicated() { return replicateBlocks.size(); } - /** - * The number of pending cache work items - */ - int getNumberOfBlocksToBeCached() { - return cacheBlocks.size(); - } - /** * The number of block invalidation items that are pending to * be sent to the datanode @@ -518,23 +491,10 @@ int getNumberOfBlocksToBeInvalidated() { } } - /** - * The number of pending uncache work items - */ - int getNumberOfBlocksToBeUncached() { - synchronized (blocksToUncache) { - return blocksToUncache.size(); - } - } - public List getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } - public List getCacheBlocks() { - return cacheBlocks.poll(cacheBlocks.size()); - } - public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { List blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) @@ -553,17 +513,6 @@ public Block[] getInvalidateBlocks(int maxblocks) { } } - /** - * Remove up to the maximum number of blocks to be uncached - */ - public Block[] getInvalidateCacheBlocks() { - synchronized (blocksToUncache) { - Block[] deleteList = blocksToUncache.pollToArray( - new Block[blocksToUncache.size()]); - return deleteList.length == 0 ? null : deleteList; - } - } - /** * @return Approximate number of blocks currently scheduled to be written * to this datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 006184a9ac2..00672acff50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; +import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.HostFileManager; import org.apache.hadoop.hdfs.server.namenode.HostFileManager.Entry; import org.apache.hadoop.hdfs.server.namenode.HostFileManager.EntrySet; @@ -76,6 +78,7 @@ import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; @@ -167,6 +170,12 @@ public class DatanodeManager { */ private boolean hasClusterEverBeenMultiRack = false; + /** + * Whether we should tell datanodes what to cache in replies to + * heartbeat messages. + */ + private boolean sendCachingCommands = false; + /** * The number of datanodes for each software version. This list should change * during rolling upgrades. @@ -1305,26 +1314,17 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId, blks)); } - - // Check pending caching - List pendingCacheList = nodeinfo.getCacheBlocks(); - if (pendingCacheList != null) { - long blockIds[] = new long[pendingCacheList.size()]; - for (int i = 0; i < pendingCacheList.size(); i++) { - blockIds[i] = pendingCacheList.get(i).getBlockId(); - } - cmds.add(new BlockIdCommand(DatanodeProtocol.DNA_CACHE, blockPoolId, - blockIds)); + DatanodeCommand pendingCacheCommand = + getCacheCommand(nodeinfo.getPendingCached(), nodeinfo, + DatanodeProtocol.DNA_CACHE, blockPoolId); + if (pendingCacheCommand != null) { + cmds.add(pendingCacheCommand); } - // Check cached block invalidation - blks = nodeinfo.getInvalidateCacheBlocks(); - if (blks != null) { - long blockIds[] = new long[blks.length]; - for (int i = 0; i < blks.length; i++) { - blockIds[i] = blks[i].getBlockId(); - } - cmds.add(new BlockIdCommand(DatanodeProtocol.DNA_UNCACHE, - blockPoolId, blockIds)); + DatanodeCommand pendingUncacheCommand = + getCacheCommand(nodeinfo.getPendingUncached(), nodeinfo, + DatanodeProtocol.DNA_UNCACHE, blockPoolId); + if (pendingUncacheCommand != null) { + cmds.add(pendingUncacheCommand); } blockManager.addKeyUpdateCommand(cmds, nodeinfo); @@ -1345,6 +1345,40 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, return new DatanodeCommand[0]; } + /** + * Convert a CachedBlockList into a DatanodeCommand with a list of blocks. + * + * @param list The {@link CachedBlocksList}. This function + * clears the list. + * @param datanode The datanode. + * @param action The action to perform in the command. + * @param poolId The block pool id. + * @return A DatanodeCommand to be sent back to the DN, or null if + * there is nothing to be done. + */ + private DatanodeCommand getCacheCommand(CachedBlocksList list, + DatanodeDescriptor datanode, int action, String poolId) { + int length = list.size(); + if (length == 0) { + return null; + } + // Read and clear the existing cache commands. + long[] blockIds = new long[length]; + int i = 0; + for (Iterator iter = list.iterator(); + iter.hasNext(); ) { + CachedBlock cachedBlock = iter.next(); + blockIds[i++] = cachedBlock.getBlockId(); + iter.remove(); + } + if (!sendCachingCommands) { + // Do not send caching commands unless the FSNamesystem told us we + // should. + return null; + } + return new BlockIdCommand(action, poolId, blockIds); + } + /** * Tell all datanodes to use a new, non-persistent bandwidth value for * dfs.balance.bandwidthPerSec. @@ -1393,4 +1427,8 @@ public void clearPendingQueues() { public String toString() { return getClass().getSimpleName() + ": " + host2DatanodeMap; } + + public void setSendCachingCommands(boolean sendCachingCommands) { + this.sendCachingCommands = sendCachingCommands; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 4b4d38e7156..841ca41755f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import java.io.PrintWriter; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -34,22 +35,24 @@ * on the machine in question. */ @InterfaceAudience.Private -abstract class InvalidateBlocks { +class InvalidateBlocks { /** Mapping: StorageID -> Collection of Blocks */ private final Map> node2blocks = new TreeMap>(); /** The total number of blocks in the map. */ private long numBlocks = 0L; + private final DatanodeManager datanodeManager; + + InvalidateBlocks(final DatanodeManager datanodeManager) { + this.datanodeManager = datanodeManager; + } + /** @return the number of blocks to be invalidated . */ synchronized long numBlocks() { return numBlocks; } - synchronized int numStorages() { - return node2blocks.size(); - } - /** * @return true if the given storage has the given block listed for * invalidation. Blocks are compared including their generation stamps: @@ -108,22 +111,22 @@ synchronized void remove(final String storageID, final Block block) { } } - /** - * Polls up to limit blocks from the list of to-be-invalidated Blocks - * for a storage. - */ - synchronized List pollNumBlocks(final String storageId, final int limit) { - final LightWeightHashSet set = node2blocks.get(storageId); - if (set == null) { - return null; + /** Print the contents to out. */ + synchronized void dump(final PrintWriter out) { + final int size = node2blocks.values().size(); + out.println("Metasave: Blocks " + numBlocks + + " waiting deletion from " + size + " datanodes."); + if (size == 0) { + return; } - List polledBlocks = set.pollN(limit); - // Remove the storage if the set is now empty - if (set.isEmpty()) { - remove(storageId); + + for(Map.Entry> entry : node2blocks.entrySet()) { + final LightWeightHashSet blocks = entry.getValue(); + if (blocks.size() > 0) { + out.println(datanodeManager.getDatanode(entry.getKey())); + out.println(blocks); + } } - numBlocks -= polledBlocks.size(); - return polledBlocks; } /** @return a list of the storage IDs. */ @@ -131,22 +134,26 @@ synchronized List getStorageIDs() { return new ArrayList(node2blocks.keySet()); } - /** - * Return the set of to-be-invalidated blocks for a storage. - */ - synchronized LightWeightHashSet getBlocks(String storageId) { - return node2blocks.get(storageId); - } + synchronized List invalidateWork( + final String storageId, final DatanodeDescriptor dn) { + final LightWeightHashSet set = node2blocks.get(storageId); + if (set == null) { + return null; + } - /** - * Schedules invalidation work associated with a storage at the corresponding - * datanode. - * @param storageId Storage of blocks to be invalidated - * @param dn Datanode where invalidation work will be scheduled - * @return List of blocks scheduled for invalidation at the datanode - */ - abstract List invalidateWork(final String storageId, - final DatanodeDescriptor dn); + // # blocks that can be sent in one message is limited + final int limit = datanodeManager.blockInvalidateLimit; + final List toInvalidate = set.pollN(limit); + + // If we send everything in this message, remove this node entry + if (set.isEmpty()) { + remove(storageId); + } + + dn.addBlocksToBeInvalidated(toInvalidate); + numBlocks -= toInvalidate.size(); + return toInvalidate; + } synchronized void clear() { node2blocks.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java deleted file mode 100644 index 23f3c68eea3..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateStoredBlocks.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.io.PrintWriter; -import java.util.List; - -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.util.LightWeightHashSet; - -/** - * Subclass of InvalidateBlocks used by the BlockManager to - * track blocks on each storage that are scheduled to be invalidated. - */ -public class InvalidateStoredBlocks extends InvalidateBlocks { - - private final DatanodeManager datanodeManager; - - InvalidateStoredBlocks(DatanodeManager datanodeManager) { - this.datanodeManager = datanodeManager; - } - - /** Print the contents to out. */ - synchronized void dump(final PrintWriter out) { - final int size = numStorages(); - out.println("Metasave: Blocks " + numBlocks() - + " waiting deletion from " + size + " datanodes."); - if (size == 0) { - return; - } - - List storageIds = getStorageIDs(); - for (String storageId: storageIds) { - LightWeightHashSet blocks = getBlocks(storageId); - if (blocks != null && !blocks.isEmpty()) { - out.println(datanodeManager.getDatanode(storageId)); - out.println(blocks); - } - } - } - - @Override - synchronized List invalidateWork( - final String storageId, final DatanodeDescriptor dn) { - final List toInvalidate = pollNumBlocks(storageId, - datanodeManager.blockInvalidateLimit); - if (toInvalidate != null) { - dn.addBlocksToBeInvalidated(toInvalidate); - } - return toInvalidate; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java index 4f304a1846d..6b07b789341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java @@ -29,27 +29,20 @@ import java.util.Map; import org.apache.commons.logging.Log; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.util.Daemon; -/** - * PendingReplicationBlocks is used in the BlockManager to track blocks that are - * currently being replicated on disk and in the CacheReplicationManager to - * track blocks that are currently being cached. - * - *

- * PendingReplicationBlocks performs the following tasks: - *

- * - *
    - *
  1. tracks in-flight replication or caching requests for a block at target - * datanodes.
  2. - *
  3. identifies requests that have timed out and need to be rescheduled at a - * different datanode.
  4. - *
- */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) +/*************************************************** + * PendingReplicationBlocks does the bookkeeping of all + * blocks that are getting replicated. + * + * It does the following: + * 1) record blocks that are getting replicated at this instant. + * 2) a coarse grain timer to track age of replication request + * 3) a thread that periodically identifies replication-requests + * that never made it. + * + ***************************************************/ class PendingReplicationBlocks { private static final Log LOG = BlockManager.LOG; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java deleted file mode 100644 index c32e5d1dc25..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReportProcessor.java +++ /dev/null @@ -1,271 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.io.IOException; -import java.util.Collection; -import java.util.Iterator; -import java.util.LinkedList; - -import org.apache.commons.logging.Log; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; -import org.apache.hadoop.hdfs.server.namenode.NameNode; - -import com.google.common.base.Preconditions; - -/** - * Handles common operations of processing a block report from a datanode, - * generating a diff of updates to the BlocksMap, and then feeding the diff - * to the subclass-implemented hooks. - */ -@InterfaceAudience.LimitedPrivate({"HDFS"}) -public abstract class ReportProcessor { - - static final Log blockLog = NameNode.blockStateChangeLog; - private final String className = getClass().getSimpleName(); - // Max number of blocks to log info about during a block report. - final long maxNumBlocksToLog; - - void blockLogDebug(String message) { - if (blockLog.isDebugEnabled()) { - blockLog.info("BLOCK* " + className + message); - } - } - - void blockLogInfo(String message) { - if (blockLog.isInfoEnabled()) { - blockLog.info("BLOCK* " + className + message); - } - } - - void blockLogWarn(String message) { - blockLog.warn("BLOCK* " + className + message); - } - - void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { - if (!blockLog.isInfoEnabled()) { - return; - } - StringBuilder sb = new StringBuilder(500); - sb.append("BLOCK* " + className + "#addStoredBlock: blockMap updated: ") - .append(node) - .append(" is added to "); - storedBlock.appendStringTo(sb); - sb.append(" size " ) - .append(storedBlock.getNumBytes()); - blockLog.info(sb); - } - - public ReportProcessor(Configuration conf) { - this.maxNumBlocksToLog = conf.getLong( - DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, - DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); - } - - /** - * Processes a block report from a datanode, updating the block to - * datanode mapping, adding new blocks and removing invalid ones. - * Also computes and queues new replication and invalidation work. - * @param node Datanode sending the block report - * @param report as list of longs - * @throws IOException - */ - final void processReport(final DatanodeDescriptor node, - final BlockListAsLongs report) throws IOException { - // Normal case: - // Modify the (block-->datanode) map, according to the difference - // between the old and new block report. - // - Collection toAdd = new LinkedList(); - Collection toRemove = new LinkedList(); - Collection toInvalidate = new LinkedList(); - Collection toCorrupt = new LinkedList(); - Collection toUC = new LinkedList(); - reportDiff(node, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); - - // Process the blocks on each queue - for (StatefulBlockInfo b : toUC) { - addStoredBlockUnderConstruction(b.storedBlock, node, b.reportedState); - } - for (Block b : toRemove) { - removeStoredBlock(b, node); - } - int numBlocksLogged = 0; - for (BlockInfo b : toAdd) { - addStoredBlock(b, node, null, numBlocksLogged < maxNumBlocksToLog); - numBlocksLogged++; - } - - if (numBlocksLogged > maxNumBlocksToLog) { - blockLogInfo("#processReport: logged" - + " info for " + maxNumBlocksToLog - + " of " + numBlocksLogged + " reported."); - } - for (Block b : toInvalidate) { - blockLogInfo("#processReport: " - + b + " on " + node + " size " + b.getNumBytes() - + " does not belong to any file"); - addToInvalidates(b, node); - } - for (BlockToMarkCorrupt b : toCorrupt) { - markBlockAsCorrupt(b, node); - } - } - - /** - * Compute the difference between the current state of the datanode in the - * BlocksMap and the new reported state, categorizing changes into - * different groups (e.g. new blocks to be added, blocks that were removed, - * blocks that should be invalidated, etc.). - */ - private void reportDiff(DatanodeDescriptor dn, - BlockListAsLongs newReport, - Collection toAdd, // add to DatanodeDescriptor - Collection toRemove, // remove from DatanodeDescriptor - Collection toInvalidate, // should be removed from DN - Collection toCorrupt, // add to corrupt replicas list - Collection toUC) { // add to under-construction list - // place a delimiter in the list which separates blocks - // that have been reported from those that have not - BlockInfo delimiter = new BlockInfo(new Block(), 1); - boolean added = addBlock(dn, delimiter); - assert added : "Delimiting block cannot be present in the node"; - int headIndex = 0; //currently the delimiter is in the head of the list - int curIndex; - - if (newReport == null) { - newReport = new BlockListAsLongs(); - } - // scan the report and process newly reported blocks - BlockReportIterator itBR = newReport.getBlockReportIterator(); - while (itBR.hasNext()) { - Block iblk = itBR.next(); - ReplicaState iState = itBR.getCurrentReplicaState(); - BlockInfo storedBlock = processReportedBlock(dn, iblk, iState, - toAdd, toInvalidate, toCorrupt, toUC); - // move block to the head of the list - if (storedBlock != null && (curIndex = storedBlock.findDatanode(dn)) >= 0) { - headIndex = moveBlockToHead(dn, storedBlock, curIndex, headIndex); - } - } - // collect blocks that have not been reported - // all of them are next to the delimiter - Iterator it = new DatanodeDescriptor.BlockIterator( - delimiter.getNext(0), dn); - while (it.hasNext()) { - toRemove.add(it.next()); - } - removeBlock(dn, delimiter); - } - - // Operations on the blocks on a datanode - - abstract int moveBlockToHead(DatanodeDescriptor dn, BlockInfo storedBlock, - int curIndex, int headIndex); - - abstract boolean addBlock(DatanodeDescriptor dn, BlockInfo block); - - abstract boolean removeBlock(DatanodeDescriptor dn, BlockInfo block); - - // Cache report processing - - abstract BlockInfo processReportedBlock(DatanodeDescriptor dn, Block iblk, - ReplicaState iState, Collection toAdd, - Collection toInvalidate, Collection toCorrupt, - Collection toUC); - - // Hooks for processing the cache report diff - - abstract Block addStoredBlock(final BlockInfo block, - DatanodeDescriptor node, DatanodeDescriptor delNodeHint, - boolean logEveryBlock) throws IOException; - - abstract void removeStoredBlock(Block block, DatanodeDescriptor node); - - abstract void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeInfo dn) - throws IOException; - - abstract void addToInvalidates(final Block b, final DatanodeInfo node); - - abstract void addStoredBlockUnderConstruction( - BlockInfoUnderConstruction storedBlock, DatanodeDescriptor node, - ReplicaState reportedState) throws IOException; - - /** - * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a - * list of blocks that should be considered corrupt due to a block report. - */ - static class BlockToMarkCorrupt { - /** The corrupted block in a datanode. */ - final BlockInfo corrupted; - /** The corresponding block stored in the BlockManager. */ - final BlockInfo stored; - /** The reason to mark corrupt. */ - final String reason; - - BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason) { - Preconditions.checkNotNull(corrupted, "corrupted is null"); - Preconditions.checkNotNull(stored, "stored is null"); - - this.corrupted = corrupted; - this.stored = stored; - this.reason = reason; - } - - BlockToMarkCorrupt(BlockInfo stored, String reason) { - this(stored, stored, reason); - } - - BlockToMarkCorrupt(BlockInfo stored, long gs, String reason) { - this(new BlockInfo(stored), stored, reason); - //the corrupted block in datanode has a different generation stamp - corrupted.setGenerationStamp(gs); - } - - @Override - public String toString() { - return corrupted + "(" - + (corrupted == stored? "same as stored": "stored=" + stored) + ")"; - } - } - - /** - * StatefulBlockInfo is used to build the "toUC" list, which is a list of - * updates to the information about under-construction blocks. - * Besides the block in question, it provides the ReplicaState - * reported by the datanode in the block report. - */ - static class StatefulBlockInfo { - final BlockInfoUnderConstruction storedBlock; - final ReplicaState reportedState; - - StatefulBlockInfo(BlockInfoUnderConstruction storedBlock, - ReplicaState reportedState) { - this.storedBlock = storedBlock; - this.reportedState = reportedState; - } - } - -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java deleted file mode 100644 index 855b73feb96..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UncacheBlocks.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.Block; - -/** - * Subclass of InvalidateBlocks used by the CacheReplicationManager to - * track blocks on each storage that are scheduled to be uncached. - */ -@InterfaceAudience.Private -public class UncacheBlocks extends InvalidateBlocks { - - UncacheBlocks() { - } - - @Override - synchronized List invalidateWork( - final String storageId, final DatanodeDescriptor dn) { - final List toInvalidate = pollNumBlocks(storageId, Integer.MAX_VALUE); - if (toInvalidate != null) { - dn.addBlocksToBeUncached(toInvalidate); - } - return toInvalidate; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index ce934764673..7fca64b39e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -628,6 +628,8 @@ private boolean processCommandFromStandby(DatanodeCommand cmd, case DatanodeProtocol.DNA_FINALIZE: case DatanodeProtocol.DNA_RECOVERBLOCK: case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE: + case DatanodeProtocol.DNA_CACHE: + case DatanodeProtocol.DNA_UNCACHE: LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction()); break; default: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 4bd1cf5039c..dd05c18bfc8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -21,24 +21,10 @@ import java.io.FileInputStream; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionService; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.io.IOUtils; @@ -50,10 +36,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.server.datanode.DataNode; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Manages caching for an FsDatasetImpl by using the mmap(2) and mlock(2) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 739a98f7fc0..39a0aab2d61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -17,53 +17,97 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT; +import java.io.Closeable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map.Entry; import java.util.SortedMap; import java.util.TreeMap; +import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; -import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; +import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.InvalidPoolNameError; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.UnexpectedAddPathBasedCacheDirectiveException; +import org.apache.hadoop.hdfs.protocol.AddPathBasedCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.PathBasedCacheEntry; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; -import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.UnexpectedRemovePathBasedCacheDescriptorException; +import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.RemovePermissionDeniedException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; +import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; +import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.GSet; +import org.apache.hadoop.util.LightWeightGSet; +import org.apache.hadoop.util.Time; -import com.google.common.base.Preconditions; +import com.google.common.annotations.VisibleForTesting; /** * The Cache Manager handles caching on DataNodes. + * + * This class is instantiated by the FSNamesystem when caching is enabled. + * It maintains the mapping of cached blocks to datanodes via processing + * datanode cache reports. Based on these reports and addition and removal of + * caching directives, we will schedule caching and uncaching work. */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) public final class CacheManager { public static final Log LOG = LogFactory.getLog(CacheManager.class); + // TODO: add pending / underCached / schedule cached blocks stats. + + /** + * The FSNamesystem that contains this CacheManager. + */ + private final FSNamesystem namesystem; + + /** + * The BlockManager associated with the FSN that owns this CacheManager. + */ + private final BlockManager blockManager; + /** * Cache entries, sorted by ID. * @@ -73,6 +117,12 @@ public final class CacheManager { private final TreeMap entriesById = new TreeMap(); + /** + * The entry ID to use for a new entry. Entry IDs always increase, and are + * never reused. + */ + private long nextEntryId; + /** * Cache entries, sorted by path */ @@ -85,11 +135,6 @@ public final class CacheManager { private final TreeMap cachePools = new TreeMap(); - /** - * The entry ID to use for a new entry. - */ - private long nextEntryId; - /** * Maximum number of cache pools to list in one operation. */ @@ -100,44 +145,129 @@ public final class CacheManager { */ private final int maxListCacheDescriptorsResponses; - final private FSNamesystem namesystem; - final private FSDirectory dir; + /** + * Interval between scans in milliseconds. + */ + private final long scanIntervalMs; - CacheManager(FSNamesystem namesystem, FSDirectory dir, Configuration conf) { - clear(); + /** + * Whether caching is enabled. + * + * If caching is disabled, we will not process cache reports or store + * information about what is cached where. We also do not start the + * CacheReplicationMonitor thread. This will save resources, but provide + * less functionality. + * + * Even when caching is disabled, we still store path-based cache + * information. This information is stored in the edit log and fsimage. We + * don't want to lose it just because a configuration setting was turned off. + * However, we will not act on this information if caching is disabled. + */ + private final boolean enabled; + + /** + * Whether the CacheManager is active. + * + * When the CacheManager is active, it tells the DataNodes what to cache + * and uncache. The CacheManager cannot become active if enabled = false. + */ + private boolean active = false; + + /** + * All cached blocks. + */ + private final GSet cachedBlocks; + + /** + * The CacheReplicationMonitor. + */ + private CacheReplicationMonitor monitor; + + CacheManager(FSNamesystem namesystem, Configuration conf, + BlockManager blockManager) { this.namesystem = namesystem; - this.dir = dir; - maxListCachePoolsResponses = conf.getInt( + this.blockManager = blockManager; + this.nextEntryId = 1; + this.maxListCachePoolsResponses = conf.getInt( DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT); - maxListCacheDescriptorsResponses = conf.getInt( + this.maxListCacheDescriptorsResponses = conf.getInt( DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES, DFS_NAMENODE_LIST_CACHE_DESCRIPTORS_NUM_RESPONSES_DEFAULT); - } - - synchronized void clear() { - entriesById.clear(); - entriesByPath.clear(); - cachePools.clear(); - nextEntryId = 1; + scanIntervalMs = conf.getLong( + DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, + DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT); + this.enabled = conf.getBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, + DFS_NAMENODE_CACHING_ENABLED_DEFAULT); + this.cachedBlocks = !enabled ? null : + new LightWeightGSet( + LightWeightGSet.computeCapacity(0.25, "cachedBlocks")); } /** - * Returns the next entry ID to be used for a PathBasedCacheEntry + * Activate the cache manager. + * + * When the cache manager is active, tell the datanodes where to cache files. */ - synchronized long getNextEntryId() { - Preconditions.checkArgument(nextEntryId != Long.MAX_VALUE); + public void activate() { + assert namesystem.hasWriteLock(); + if (enabled && (!active)) { + LOG.info("Activating CacheManager. " + + "Starting replication monitor thread..."); + active = true; + monitor = new CacheReplicationMonitor(namesystem, this, + scanIntervalMs); + monitor.start(); + } + } + + /** + * Deactivate the cache manager. + * + * When the cache manager is inactive, it does not tell the datanodes where to + * cache files. + */ + public void deactivate() { + assert namesystem.hasWriteLock(); + if (active) { + LOG.info("Deactivating CacheManager. " + + "stopping CacheReplicationMonitor thread..."); + active = false; + IOUtils.closeQuietly(monitor); + monitor = null; + LOG.info("CacheReplicationMonitor thread stopped and deactivated."); + } + } + + /** + * Return true only if the cache manager is active. + * Must be called under the FSN read or write lock. + */ + public boolean isActive() { + return active; + } + + public TreeMap getEntriesById() { + assert namesystem.hasReadOrWriteLock(); + return entriesById; + } + + @VisibleForTesting + public GSet getCachedBlocks() { + assert namesystem.hasReadOrWriteLock(); + return cachedBlocks; + } + + private long getNextEntryId() throws IOException { + assert namesystem.hasWriteLock(); + if (nextEntryId == Long.MAX_VALUE) { + throw new IOException("No more available IDs"); + } return nextEntryId++; } - /** - * Returns the PathBasedCacheEntry corresponding to a PathBasedCacheEntry. - * - * @param directive Lookup directive - * @return Corresponding PathBasedCacheEntry, or null if not present. - */ - private synchronized PathBasedCacheEntry - findEntry(PathBasedCacheDirective directive) { + private PathBasedCacheEntry findEntry(PathBasedCacheDirective directive) { + assert namesystem.hasReadOrWriteLock(); List existing = entriesByPath.get(directive.getPath().toUri().getPath()); if (existing == null) { @@ -151,56 +281,10 @@ synchronized long getNextEntryId() { return null; } - /** - * Add a new PathBasedCacheEntry, skipping any validation checks. Called - * directly when reloading CacheManager state from FSImage. - * - * @throws IOException if unable to cache the entry - */ - private void unprotectedAddEntry(PathBasedCacheEntry entry) - throws IOException { - assert namesystem.hasWriteLock(); - // Add it to the various maps - entriesById.put(entry.getEntryId(), entry); - String path = entry.getPath(); - List entryList = entriesByPath.get(path); - if (entryList == null) { - entryList = new ArrayList(1); - entriesByPath.put(path, entryList); - } - entryList.add(entry); - // Set the path as cached in the namesystem - try { - INode node = dir.getINode(entry.getPath()); - if (node != null && node.isFile()) { - INodeFile file = node.asFile(); - // TODO: adjustable cache replication factor - namesystem.setCacheReplicationInt(entry.getPath(), - file.getBlockReplication()); - } else { - LOG.warn("Path " + entry.getPath() + " is not a file"); - } - } catch (IOException ioe) { - LOG.info("unprotectedAddEntry " + entry +": failed to cache file: " + - ioe.getClass().getName() +": " + ioe.getMessage()); - throw ioe; - } - } - - /** - * Add a new PathBasedCacheDirective if valid, returning a corresponding - * PathBasedCacheDescriptor to the user. - * - * @param directive Directive describing the cache entry being added - * @param pc Permission checker used to validate that the calling user has - * access to the destination cache pool - * @return Corresponding PathBasedCacheDescriptor for the new cache entry - * @throws IOException if the directive is invalid or was otherwise - * unsuccessful - */ - public synchronized PathBasedCacheDescriptor addDirective( + public PathBasedCacheDescriptor addDirective( PathBasedCacheDirective directive, FSPermissionChecker pc) throws IOException { + assert namesystem.hasWriteLock(); CachePool pool = cachePools.get(directive.getPool()); if (pool == null) { LOG.info("addDirective " + directive + ": pool not found."); @@ -225,47 +309,37 @@ public synchronized PathBasedCacheDescriptor addDirective( "existing directive " + existing + " in this pool."); return existing.getDescriptor(); } - - // Success! - PathBasedCacheDescriptor d = unprotectedAddDirective(directive); - LOG.info("addDirective " + directive + ": added cache directive " - + directive); - return d; - } - - /** - * Assigns a new entry ID to a validated PathBasedCacheDirective and adds - * it to the CacheManager. Called directly when replaying the edit log. - * - * @param directive Directive being added - * @return PathBasedCacheDescriptor for the directive - * @throws IOException - */ - PathBasedCacheDescriptor unprotectedAddDirective( - PathBasedCacheDirective directive) throws IOException { - assert namesystem.hasWriteLock(); - CachePool pool = cachePools.get(directive.getPool()); // Add a new entry with the next available ID. PathBasedCacheEntry entry; - entry = new PathBasedCacheEntry(getNextEntryId(), - directive.getPath().toUri().getPath(), - directive.getReplication(), pool); - - unprotectedAddEntry(entry); + try { + entry = new PathBasedCacheEntry(getNextEntryId(), + directive.getPath().toUri().getPath(), + directive.getReplication(), pool); + } catch (IOException ioe) { + throw new UnexpectedAddPathBasedCacheDirectiveException(directive); + } + LOG.info("addDirective " + directive + ": added cache directive " + + directive); + // Success! + // First, add it to the various maps + entriesById.put(entry.getEntryId(), entry); + String path = directive.getPath().toUri().getPath(); + List entryList = entriesByPath.get(path); + if (entryList == null) { + entryList = new ArrayList(1); + entriesByPath.put(path, entryList); + } + entryList.add(entry); + if (monitor != null) { + monitor.kick(); + } return entry.getDescriptor(); } - /** - * Remove the PathBasedCacheEntry corresponding to a descriptor ID from - * the CacheManager. - * - * @param id of the PathBasedCacheDescriptor - * @param pc Permissions checker used to validated the request - * @throws IOException - */ - public synchronized void removeDescriptor(long id, FSPermissionChecker pc) + public void removeDescriptor(long id, FSPermissionChecker pc) throws IOException { + assert namesystem.hasWriteLock(); // Check for invalid IDs. if (id <= 0) { LOG.info("removeDescriptor " + id + ": invalid non-positive " + @@ -290,20 +364,6 @@ public synchronized void removeDescriptor(long id, FSPermissionChecker pc) throw new RemovePermissionDeniedException(id); } - unprotectedRemoveDescriptor(id); - } - - /** - * Unchecked internal method used to remove a PathBasedCacheEntry from the - * CacheManager. Called directly when replaying the edit log. - * - * @param id of the PathBasedCacheDescriptor corresponding to the entry that - * is being removed - * @throws IOException - */ - void unprotectedRemoveDescriptor(long id) throws IOException { - assert namesystem.hasWriteLock(); - PathBasedCacheEntry existing = entriesById.get(id); // Remove the corresponding entry in entriesByPath. String path = existing.getDescriptor().getPath().toUri().getPath(); List entries = entriesByPath.get(path); @@ -314,26 +374,16 @@ void unprotectedRemoveDescriptor(long id) throws IOException { entriesByPath.remove(path); } entriesById.remove(id); - - // Set the path as uncached in the namesystem - try { - INode node = dir.getINode(existing.getDescriptor().getPath().toUri(). - getPath()); - if (node != null && node.isFile()) { - namesystem.setCacheReplicationInt(existing.getDescriptor().getPath(). - toUri().getPath(), (short) 0); - } - } catch (IOException e) { - LOG.warn("removeDescriptor " + id + ": failure while setting cache" - + " replication factor", e); - throw e; + if (monitor != null) { + monitor.kick(); } LOG.info("removeDescriptor successful for PathCacheEntry id " + id); } - public synchronized BatchedListEntries + public BatchedListEntries listPathBasedCacheDescriptors(long prevId, String filterPool, String filterPath, FSPermissionChecker pc) throws IOException { + assert namesystem.hasReadOrWriteLock(); final int NUM_PRE_ALLOCATED_ENTRIES = 16; if (filterPath != null) { if (!DFSUtil.isValidName(filterPath)) { @@ -370,12 +420,13 @@ void unprotectedRemoveDescriptor(long id) throws IOException { * Create a cache pool. * * Only the superuser should be able to call this function. - * - * @param info The info for the cache pool to create. - * @return the created CachePool + * + * @param info The info for the cache pool to create. + * @return Information about the cache pool we created. */ - public synchronized CachePoolInfo addCachePool(CachePoolInfo info) + public CachePoolInfo addCachePool(CachePoolInfo info) throws IOException { + assert namesystem.hasWriteLock(); CachePoolInfo.validate(info); String poolName = info.getPoolName(); CachePool pool = cachePools.get(poolName); @@ -384,20 +435,8 @@ public synchronized CachePoolInfo addCachePool(CachePoolInfo info) } pool = CachePool.createFromInfoAndDefaults(info); cachePools.put(pool.getPoolName(), pool); - return pool.getInfo(true); - } - - /** - * Internal unchecked method used to add a CachePool. Called directly when - * reloading CacheManager state from the FSImage or edit log. - * - * @param pool to be added - */ - void unprotectedAddCachePool(CachePoolInfo info) { - assert namesystem.hasWriteLock(); - CachePool pool = CachePool.createFromInfo(info); - cachePools.put(pool.getPoolName(), pool); LOG.info("created new cache pool " + pool); + return pool.getInfo(true); } /** @@ -408,8 +447,9 @@ void unprotectedAddCachePool(CachePoolInfo info) { * @param info * The info for the cache pool to modify. */ - public synchronized void modifyCachePool(CachePoolInfo info) + public void modifyCachePool(CachePoolInfo info) throws IOException { + assert namesystem.hasWriteLock(); CachePoolInfo.validate(info); String poolName = info.getPoolName(); CachePool pool = cachePools.get(poolName); @@ -455,8 +495,9 @@ public synchronized void modifyCachePool(CachePoolInfo info) * @param poolName * The name for the cache pool to remove. */ - public synchronized void removeCachePool(String poolName) + public void removeCachePool(String poolName) throws IOException { + assert namesystem.hasWriteLock(); CachePoolInfo.validateName(poolName); CachePool pool = cachePools.remove(poolName); if (pool == null) { @@ -475,10 +516,14 @@ public synchronized void removeCachePool(String poolName) iter.remove(); } } + if (monitor != null) { + monitor.kick(); + } } - public synchronized BatchedListEntries + public BatchedListEntries listCachePools(FSPermissionChecker pc, String prevKey) { + assert namesystem.hasReadOrWriteLock(); final int NUM_PRE_ALLOCATED_ENTRIES = 16; ArrayList results = new ArrayList(NUM_PRE_ALLOCATED_ENTRIES); @@ -497,9 +542,104 @@ public synchronized void removeCachePool(String poolName) return new BatchedListEntries(results, false); } - /* - * FSImage related serialization and deserialization code - */ + public void setCachedLocations(LocatedBlock block) { + if (!enabled) { + return; + } + CachedBlock cachedBlock = + new CachedBlock(block.getBlock().getBlockId(), + (short)0, false); + cachedBlock = cachedBlocks.get(cachedBlock); + if (cachedBlock == null) { + return; + } + List datanodes = cachedBlock.getDatanodes(Type.CACHED); + for (DatanodeDescriptor datanode : datanodes) { + block.addCachedLoc(datanode); + } + } + + public final void processCacheReport(final DatanodeID datanodeID, + final BlockListAsLongs report) throws IOException { + if (!enabled) { + LOG.info("Ignoring cache report from " + datanodeID + + " because " + DFS_NAMENODE_CACHING_ENABLED_KEY + " = false. " + + "number of blocks: " + report.getNumberOfBlocks()); + return; + } + namesystem.writeLock(); + final long startTime = Time.monotonicNow(); + final long endTime; + try { + final DatanodeDescriptor datanode = + blockManager.getDatanodeManager().getDatanode(datanodeID); + if (datanode == null || !datanode.isAlive) { + throw new IOException( + "processCacheReport from dead or unregistered datanode: " + datanode); + } + processCacheReportImpl(datanode, report); + } finally { + endTime = Time.monotonicNow(); + namesystem.writeUnlock(); + } + + // Log the block report processing stats from Namenode perspective + final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); + if (metrics != null) { + metrics.addCacheBlockReport((int) (endTime - startTime)); + } + LOG.info("Processed cache report from " + + datanodeID + ", blocks: " + report.getNumberOfBlocks() + + ", processing time: " + (endTime - startTime) + " msecs"); + } + + private void processCacheReportImpl(final DatanodeDescriptor datanode, + final BlockListAsLongs report) { + CachedBlocksList cached = datanode.getCached(); + cached.clear(); + BlockReportIterator itBR = report.getBlockReportIterator(); + while (itBR.hasNext()) { + Block block = itBR.next(); + ReplicaState iState = itBR.getCurrentReplicaState(); + if (iState != ReplicaState.FINALIZED) { + LOG.error("Cached block report contained unfinalized block " + block); + continue; + } + BlockInfo blockInfo = blockManager.getStoredBlock(block); + if (blockInfo.getGenerationStamp() < block.getGenerationStamp()) { + // The NameNode will eventually remove or update the out-of-date block. + // Until then, we pretend that it isn't cached. + LOG.warn("Genstamp in cache report disagrees with our genstamp for " + + block + ": expected genstamp " + blockInfo.getGenerationStamp()); + continue; + } + Collection corruptReplicas = + blockManager.getCorruptReplicas(blockInfo); + if ((corruptReplicas != null) && corruptReplicas.contains(datanode)) { + // The NameNode will eventually remove or update the corrupt block. + // Until then, we pretend that it isn't cached. + LOG.warn("Ignoring cached replica on " + datanode + " of " + block + + " because it is corrupt."); + continue; + } + CachedBlock cachedBlock = + new CachedBlock(block.getBlockId(), (short)0, false); + CachedBlock prevCachedBlock = cachedBlocks.get(cachedBlock); + // Use the existing CachedBlock if it's present; otherwise, + // insert a new one. + if (prevCachedBlock != null) { + cachedBlock = prevCachedBlock; + } else { + cachedBlocks.put(cachedBlock); + } + if (!cachedBlock.isPresent(datanode.getCached())) { + datanode.getCached().add(cachedBlock); + } + if (cachedBlock.isPresent(datanode.getPendingCached())) { + datanode.getPendingCached().remove(cachedBlock); + } + } + } /** * Saves the current state of the CacheManager to the DataOutput. Used @@ -508,7 +648,7 @@ public synchronized void removeCachePool(String poolName) * @param sdPath path of the storage directory * @throws IOException */ - public synchronized void saveState(DataOutput out, String sdPath) + public void saveState(DataOutput out, String sdPath) throws IOException { out.writeLong(nextEntryId); savePools(out, sdPath); @@ -521,7 +661,8 @@ public synchronized void saveState(DataOutput out, String sdPath) * @param in DataInput from which to restore state * @throws IOException */ - public synchronized void loadState(DataInput in) throws IOException { + public void loadState(DataInput in) throws IOException { + assert namesystem.hasWriteLock(); nextEntryId = in.readLong(); // pools need to be loaded first since entries point to their parent pool loadPools(in); @@ -531,7 +672,7 @@ public synchronized void loadState(DataInput in) throws IOException { /** * Save cache pools to fsimage */ - private synchronized void savePools(DataOutput out, + private void savePools(DataOutput out, String sdPath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_POOLS, sdPath); @@ -549,7 +690,7 @@ private synchronized void savePools(DataOutput out, /* * Save cache entries to fsimage */ - private synchronized void saveEntries(DataOutput out, String sdPath) + private void saveEntries(DataOutput out, String sdPath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_ENTRIES, sdPath); @@ -560,6 +701,7 @@ private synchronized void saveEntries(DataOutput out, String sdPath) for (PathBasedCacheEntry entry: entriesById.values()) { out.writeLong(entry.getEntryId()); Text.writeString(out, entry.getPath()); + out.writeShort(entry.getReplication()); Text.writeString(out, entry.getPool().getPoolName()); counter.increment(); } @@ -569,7 +711,7 @@ private synchronized void saveEntries(DataOutput out, String sdPath) /** * Load cache pools from fsimage */ - private synchronized void loadPools(DataInput in) + private void loadPools(DataInput in) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_POOLS); @@ -578,8 +720,7 @@ private synchronized void loadPools(DataInput in) prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step); for (int i = 0; i < numberOfPools; i++) { - CachePoolInfo info = CachePoolInfo.readFrom(in); - unprotectedAddCachePool(info); + addCachePool(CachePoolInfo.readFrom(in)); counter.increment(); } prog.endStep(Phase.LOADING_FSIMAGE, step); @@ -588,7 +729,7 @@ private synchronized void loadPools(DataInput in) /** * Load cache entries from the fsimage */ - private synchronized void loadEntries(DataInput in) throws IOException { + private void loadEntries(DataInput in) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = new Step(StepType.CACHE_ENTRIES); prog.beginStep(Phase.LOADING_FSIMAGE, step); @@ -602,12 +743,24 @@ private synchronized void loadEntries(DataInput in) throws IOException { String poolName = Text.readString(in); // Get pool reference by looking it up in the map CachePool pool = cachePools.get(poolName); + if (pool != null) { + throw new IOException("Entry refers to pool " + poolName + + ", which does not exist."); + } PathBasedCacheEntry entry = - new PathBasedCacheEntry(entryId, path, replication, pool); - unprotectedAddEntry(entry); + new PathBasedCacheEntry(entryId, path, replication, pool); + if (entriesById.put(entry.getEntryId(), entry) != null) { + throw new IOException("An entry with ID " + entry.getEntryId() + + " already exists"); + } + List entries = entriesByPath.get(entry.getPath()); + if (entries == null) { + entries = new LinkedList(); + entriesByPath.put(entry.getPath(), entries); + } + entries.add(entry); counter.increment(); } prog.endStep(Phase.LOADING_FSIMAGE, step); } - } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ed48a28d62d..074a29743e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; @@ -1092,52 +1093,6 @@ Block[] unprotectedSetReplication(String src, short replication, return file.getBlocks(); } - /** - * Set cache replication for a file - * - * @param src file name - * @param replication new replication - * @param blockRepls block replications - output parameter - * @return array of file blocks - * @throws QuotaExceededException - * @throws SnapshotAccessControlException - */ - Block[] setCacheReplication(String src, short replication, short[] blockRepls) - throws QuotaExceededException, UnresolvedLinkException, - SnapshotAccessControlException { - waitForReady(); - writeLock(); - try { - return unprotectedSetCacheReplication(src, replication, blockRepls); - } finally { - writeUnlock(); - } - } - - Block[] unprotectedSetCacheReplication(String src, short replication, - short[] blockRepls) throws QuotaExceededException, - UnresolvedLinkException, SnapshotAccessControlException { - assert hasWriteLock(); - - final INodesInPath iip = rootDir.getINodesInPath4Write(src, true); - final INode inode = iip.getLastINode(); - if (inode == null || !inode.isFile()) { - return null; - } - INodeFile file = inode.asFile(); - final short oldBR = file.getCacheReplication(); - - // TODO: Update quotas here as repl goes up or down - file.setCacheReplication(replication); - final short newBR = file.getCacheReplication(); - - if (blockRepls != null) { - blockRepls[0] = oldBR; - blockRepls[1] = newBR; - } - return file.getBlocks(); - } - /** * @param path the file path * @return the block size of the file. @@ -2638,12 +2593,19 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; - return new HdfsLocatedFileStatus(size, node.isDirectory(), replication, - blocksize, node.getModificationTime(snapshot), - node.getAccessTime(snapshot), node.getFsPermission(snapshot), - node.getUserName(snapshot), node.getGroupName(snapshot), - node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc, childrenNum); + HdfsLocatedFileStatus status = + new HdfsLocatedFileStatus(size, node.isDirectory(), replication, + blocksize, node.getModificationTime(snapshot), + node.getAccessTime(snapshot), node.getFsPermission(snapshot), + node.getUserName(snapshot), node.getGroupName(snapshot), + node.isSymlink() ? node.asSymlink().getSymlink() : null, path, + node.getId(), loc, childrenNum); + // Set caching information for the located blocks. + CacheManager cacheManager = namesystem.getCacheManager(); + for (LocatedBlock lb: loc.getLocatedBlocks()) { + cacheManager.setCachedLocations(lb); + } + return status; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 09d363ce68e..b39e19c0c9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -648,8 +648,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, setPool(addOp.pool). build(); PathBasedCacheDescriptor descriptor = - fsNamesys.getCacheManager().unprotectedAddDirective(d); - + fsNamesys.getCacheManager().addDirective(d, null); if (toAddRetryCache) { fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, descriptor); @@ -659,8 +658,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, case OP_REMOVE_PATH_BASED_CACHE_DESCRIPTOR: { RemovePathBasedCacheDescriptorOp removeOp = (RemovePathBasedCacheDescriptorOp) op; - fsNamesys.getCacheManager().unprotectedRemoveDescriptor(removeOp.id); - + fsNamesys.getCacheManager().removeDescriptor(removeOp.id, null); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } @@ -668,8 +666,7 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, } case OP_ADD_CACHE_POOL: { AddCachePoolOp addOp = (AddCachePoolOp) op; - fsNamesys.getCacheManager().unprotectedAddCachePool(addOp.info); - + fsNamesys.getCacheManager().addCachePool(addOp.info); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } @@ -678,7 +675,6 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, case OP_MODIFY_CACHE_POOL: { ModifyCachePoolOp modifyOp = (ModifyCachePoolOp) op; fsNamesys.getCacheManager().modifyCachePool(modifyOp.info); - if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } @@ -687,7 +683,6 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, case OP_REMOVE_CACHE_POOL: { RemoveCachePoolOp removeOp = (RemoveCachePoolOp) op; fsNamesys.getCacheManager().removeCachePool(removeOp.poolName); - if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 8d37918d01a..06db72e2b3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -374,7 +374,6 @@ private void logAuditEvent(boolean succeeded, private final BlockManager blockManager; private final SnapshotManager snapshotManager; private final CacheManager cacheManager; - private final CacheReplicationManager cacheReplicationManager; private final DatanodeStatistics datanodeStatistics; // Block pool ID used by this namenode @@ -702,9 +701,12 @@ public static FSNamesystem loadFromDisk(Configuration conf) this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(fsImage, this, conf); this.snapshotManager = new SnapshotManager(dir); - this.cacheManager = new CacheManager(this, dir, conf); - this.cacheReplicationManager = new CacheReplicationManager(this, - blockManager, blockManager.getDatanodeManager(), this, conf); + writeLock(); + try { + this.cacheManager = new CacheManager(this, conf, blockManager); + } finally { + writeUnlock(); + } this.safeMode = new SafeModeInfo(conf); this.auditLoggers = initAuditLoggers(conf); this.isDefaultAuditLogger = auditLoggers.size() == 1 && @@ -881,7 +883,6 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep getCompleteBlocksTotal()); setBlockTotal(); blockManager.activate(conf); - cacheReplicationManager.activate(); } finally { writeUnlock(); } @@ -898,7 +899,7 @@ void stopCommonServices() { writeLock(); try { if (blockManager != null) blockManager.close(); - if (cacheReplicationManager != null) cacheReplicationManager.close(); + cacheManager.deactivate(); } finally { writeUnlock(); } @@ -930,8 +931,6 @@ void startActiveServices() throws IOException { blockManager.clearQueues(); blockManager.processAllPendingDNMessages(); - cacheReplicationManager.clearQueues(); - if (!isInSafeMode() || (isInSafeMode() && safeMode.isPopulatingReplQueues())) { LOG.info("Reprocessing replication and invalidation queues"); @@ -964,6 +963,8 @@ void startActiveServices() throws IOException { //ResourceMonitor required only at ActiveNN. See HDFS-2914 this.nnrmthread = new Daemon(new NameNodeResourceMonitor()); nnrmthread.start(); + cacheManager.activate(); + blockManager.getDatanodeManager().setSendCachingCommands(true); } finally { writeUnlock(); } @@ -998,6 +999,8 @@ void stopActiveServices() { // so that the tailer starts from the right spot. dir.fsImage.updateLastAppliedTxIdFromWritten(); } + cacheManager.deactivate(); + blockManager.getDatanodeManager().setSendCachingCommands(false); } finally { writeUnlock(); } @@ -1442,10 +1445,6 @@ LocatedBlocks getBlockLocations(String clientMachine, String src, blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, lastBlockList); } - // Set caching information for the block list - for (LocatedBlock lb: blocks.getLocatedBlocks()) { - cacheReplicationManager.setCachedLocations(lb); - } } return blocks; } @@ -1553,8 +1552,14 @@ && doAccessTime && isAccessTimeSupported()) { length = Math.min(length, fileSize - offset); isUc = false; } - return blockManager.createLocatedBlocks(inode.getBlocks(), fileSize, + LocatedBlocks blocks = + blockManager.createLocatedBlocks(inode.getBlocks(), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot()); + // Set caching information for the located blocks. + for (LocatedBlock lb: blocks.getLocatedBlocks()) { + cacheManager.setCachedLocations(lb); + } + return blocks; } finally { if (isReadOp) { readUnlock(); @@ -1928,42 +1933,6 @@ private boolean setReplicationInt(String src, final short replication) return isFile; } - boolean setCacheReplicationInt(String src, final short replication) - throws IOException { - final boolean isFile; - FSPermissionChecker pc = getPermissionChecker(); - checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - writeLock(); - try { - checkOperation(OperationCategory.WRITE); - if (isInSafeMode()) { - throw new SafeModeException("Cannot set replication for " + src, safeMode); - } - src = FSDirectory.resolvePath(src, pathComponents, dir); - if (isPermissionEnabled) { - checkPathAccess(pc, src, FsAction.WRITE); - } - - final short[] blockRepls = new short[2]; // 0: old, 1: new - final Block[] blocks = dir.setCacheReplication(src, replication, - blockRepls); - isFile = (blocks != null); - if (isFile) { - cacheReplicationManager.setCacheReplication(blockRepls[0], - blockRepls[1], src, blocks); - } - } finally { - writeUnlock(); - } - - getEditLog().logSync(); - if (isFile) { - logAuditEvent(true, "setCacheReplication", src); - } - return isFile; - } - long getPreferredBlockSize(String filename) throws IOException, UnresolvedLinkException { FSPermissionChecker pc = getPermissionChecker(); @@ -6506,10 +6475,6 @@ public FSDirectory getFSDirectory() { public CacheManager getCacheManager() { return cacheManager; } - /** @return the cache replication manager. */ - public CacheReplicationManager getCacheReplicationManager() { - return cacheReplicationManager; - } @Override // NameNodeMXBean public String getCorruptFiles() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 15a7d2c8e8e..455d808a37f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -104,8 +104,6 @@ static long combinePreferredBlockSize(long header, long blockSize) { private BlockInfo[] blocks; - private short cacheReplication = 0; - INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime, BlockInfo[] blklist, short replication, long preferredBlockSize) { super(id, name, permissions, mtime, atime); @@ -201,18 +199,6 @@ public final INodeFile setFileReplication(short replication, Snapshot latest, return nodeToUpdate; } - @Override - public void setCacheReplication(short cacheReplication) { - Preconditions.checkArgument(cacheReplication <= getBlockReplication(), - "Cannot set cache replication higher than block replication factor"); - this.cacheReplication = cacheReplication; - } - - @Override - public short getCacheReplication() { - return cacheReplication; - } - /** @return preferred block size (in bytes) of the file. */ @Override public long getPreferredBlockSize() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 4e1b9cd77b3..9eaeac722f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -689,8 +689,13 @@ protected NameNode(Configuration conf, NamenodeRole role) try { initializeGenericKeys(conf, nsId, namenodeId); initialize(conf); - state.prepareToEnterState(haContext); - state.enterState(haContext); + try { + haContext.writeLock(); + state.prepareToEnterState(haContext); + state.enterState(haContext); + } finally { + haContext.writeUnlock(); + } } catch (IOException e) { this.stop(); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index bbb67a3d978..e70a52d1d24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -997,9 +997,7 @@ public DatanodeCommand cacheReport(DatanodeRegistration nodeReg, + "from " + nodeReg + " " + blist.getNumberOfBlocks() + " blocks"); } - - namesystem.getCacheReplicationManager() - .processCacheReport(nodeReg, poolId, blist); + namesystem.getCacheManager().processCacheReport(nodeReg, blist); return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 1171e68b543..3d64bf81032 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1470,6 +1470,18 @@ + + dfs.namenode.path.based.cache.refresh.interval.ms + 300000 + + The amount of milliseconds between subsequent path cache rescans. Path + cache rescans are when we calculate which blocks should be cached, and on + what datanodes. + + By default, this parameter is set to 300000, which is five minutes. + + + dfs.datanode.fsdatasetcache.max.threads.per.volume 4 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java deleted file mode 100644 index 369cc376b58..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCacheReplicationManager.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.blockmanagement; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.fs.HdfsBlockLocation; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDescriptor; -import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class TestCacheReplicationManager { - - private static final long BLOCK_SIZE = 512; - private static final int REPL_FACTOR = 3; - private static final int NUM_DATANODES = 4; - // Most Linux installs allow a default of 64KB locked memory - private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES; - - private static Configuration conf; - private static MiniDFSCluster cluster = null; - private static DistributedFileSystem dfs; - private static NameNode nn; - private static NamenodeProtocols nnRpc; - private static CacheReplicationManager cacheReplManager; - final private static FileSystemTestHelper helper = new FileSystemTestHelper(); - private static Path rootDir; - - @Before - public void setUp() throws Exception { - - assumeTrue(NativeIO.isAvailable()); - - conf = new HdfsConfiguration(); - conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - CACHE_CAPACITY); - conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, true); - conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); - - cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(NUM_DATANODES).build(); - cluster.waitActive(); - - dfs = cluster.getFileSystem(); - nn = cluster.getNameNode(); - nnRpc = nn.getRpcServer(); - cacheReplManager = nn.getNamesystem().getCacheReplicationManager(); - rootDir = helper.getDefaultWorkingDirectory(dfs); - } - - @After - public void tearDown() throws Exception { - if (dfs != null) { - dfs.close(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - - private int countNumCachedBlocks() { - return cacheReplManager.cachedBlocksMap.size(); - } - - private void waitForExpectedNumCachedBlocks(final int expected) - throws Exception { - int actual = countNumCachedBlocks(); - while (expected != actual) { - Thread.sleep(500); - actual = countNumCachedBlocks(); - } - waitForExpectedNumCachedReplicas(expected*REPL_FACTOR); - } - - private void waitForExpectedNumCachedReplicas(final int expected) - throws Exception { - BlocksMap cachedBlocksMap = cacheReplManager.cachedBlocksMap; - int actual = 0; - while (expected != actual) { - Thread.sleep(500); - nn.getNamesystem().readLock(); - try { - actual = 0; - for (BlockInfo b : cachedBlocksMap.getBlocks()) { - actual += cachedBlocksMap.numNodes(b); - } - } finally { - nn.getNamesystem().readUnlock(); - } - } - } - - @Test(timeout=60000) - public void testCachePaths() throws Exception { - // Create the pool - final String pool = "friendlyPool"; - nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); - // Create some test files - final int numFiles = 2; - final int numBlocksPerFile = 2; - final List paths = new ArrayList(numFiles); - for (int i=0; i entries = - nnRpc.listPathBasedCacheDescriptors(0, null, null); - for (int i=0; i pit = dfs.listCachePools(); - assertTrue("No cache pools found", pit.hasNext()); - CachePoolInfo info = pit.next(); - assertEquals(pool, info.getPoolName()); - assertEquals(groupName, info.getGroupName()); - assertEquals(mode, info.getMode()); - assertEquals(weight, (int)info.getWeight()); - assertFalse("Unexpected # of cache pools found", pit.hasNext()); - - // Create some cache entries - int numEntries = 10; - String entryPrefix = "/party-"; - for (int i=0; i dit - = dfs.listPathBasedCacheDescriptors(null, null); - for (int i=0; i iter = dn.getCached().iterator(); + Assert.assertEquals(blocks[0], iter.next()); + Assert.assertEquals(blocks[1], iter.next()); + Assert.assertTrue(!iter.hasNext()); + // add a block to the front + Assert.assertTrue(dn.getCached().addFirst(blocks[2])); + iter = dn.getCached().iterator(); + Assert.assertEquals(blocks[2], iter.next()); + Assert.assertEquals(blocks[0], iter.next()); + Assert.assertEquals(blocks[1], iter.next()); + Assert.assertTrue(!iter.hasNext()); + // remove a block from the middle + Assert.assertTrue(dn.getCached().remove(blocks[0])); + iter = dn.getCached().iterator(); + Assert.assertEquals(blocks[2], iter.next()); + Assert.assertEquals(blocks[1], iter.next()); + Assert.assertTrue(!iter.hasNext()); + // remove all blocks + dn.getCached().clear(); + Assert.assertTrue("expected cached list to be empty after clear.", + !dn.getPendingCached().iterator().hasNext()); + } + + private void testAddElementsToList(CachedBlocksList list, + CachedBlock[] blocks) { + Assert.assertTrue("expected list to start off empty.", + !list.iterator().hasNext()); + for (CachedBlock block : blocks) { + Assert.assertTrue(list.add(block)); + } + } + + private void testRemoveElementsFromList(Random r, + CachedBlocksList list, CachedBlock[] blocks) { + int i = 0; + for (Iterator iter = list.iterator(); iter.hasNext(); ) { + Assert.assertEquals(blocks[i], iter.next()); + i++; + } + if (r.nextBoolean()) { + LOG.info("Removing via iterator"); + for (Iterator iter = list.iterator(); iter.hasNext() ;) { + iter.next(); + iter.remove(); + } + } else { + LOG.info("Removing in pseudo-random order"); + CachedBlock[] remainingBlocks = Arrays.copyOf(blocks, blocks.length); + for (int removed = 0; removed < remainingBlocks.length; ) { + int toRemove = r.nextInt(remainingBlocks.length); + if (remainingBlocks[toRemove] != null) { + Assert.assertTrue(list.remove(remainingBlocks[toRemove])); + remainingBlocks[toRemove] = null; + removed++; + } + } + } + Assert.assertTrue("expected list to be empty after everything " + + "was removed.", !list.iterator().hasNext()); + } + + @Test(timeout=60000) + public void testMultipleLists() { + DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] { + new DatanodeDescriptor( + new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002)), + new DatanodeDescriptor( + new DatanodeID("127.0.1.1", "localhost", "efgh", 6000, 6001, 6002)), + }; + CachedBlocksList[] lists = new CachedBlocksList[] { + datanodes[0].getPendingCached(), + datanodes[0].getCached(), + datanodes[1].getPendingCached(), + datanodes[1].getCached(), + datanodes[1].getPendingUncached(), + }; + final int NUM_BLOCKS = 8000; + CachedBlock[] blocks = new CachedBlock[NUM_BLOCKS]; + for (int i = 0; i < NUM_BLOCKS; i++) { + blocks[i] = new CachedBlock(i, (short)i, true); + } + Random r = new Random(654); + for (CachedBlocksList list : lists) { + testAddElementsToList(list, blocks); + } + for (CachedBlocksList list : lists) { + testRemoveElementsFromList(r, list, blocks); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index 7685c11ef78..ce3713d39d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS; import static junit.framework.Assert.assertTrue; import static junit.framework.Assert.fail; import static org.junit.Assert.assertEquals; @@ -24,6 +30,10 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; import junit.framework.Assert; @@ -31,6 +41,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; @@ -46,13 +57,19 @@ import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathBasedCacheDescriptorException.NoSuchIdException; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.GSet; import org.junit.After; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; +import com.google.common.base.Supplier; + public class TestPathBasedCacheRequests { static final Log LOG = LogFactory.getLog(TestPathBasedCacheRequests.class); @@ -83,7 +100,7 @@ public void teardown() throws Exception { } } - @Test + @Test(timeout=60000) public void testBasicPoolOperations() throws Exception { final String poolName = "pool1"; CachePoolInfo info = new CachePoolInfo(poolName). @@ -218,7 +235,7 @@ public void testBasicPoolOperations() throws Exception { dfs.addCachePool(info); } - @Test + @Test(timeout=60000) public void testCreateAndModifyPools() throws Exception { String poolName = "pool1"; String ownerName = "abc"; @@ -301,7 +318,7 @@ public PathBasedCacheDescriptor run() throws IOException { }); } - @Test + @Test(timeout=60000) public void testAddRemoveDirectives() throws Exception { proto.addCachePool(new CachePoolInfo("pool1"). setMode(new FsPermission((short)0777))); @@ -366,6 +383,7 @@ public void testAddRemoveDirectives() throws Exception { try { addAsUnprivileged(new PathBasedCacheDirective.Builder(). setPath(new Path("/emptypoolname")). + setReplication((short)1). setPool(""). build()); Assert.fail("expected an error when adding a PathBasedCache " + @@ -424,4 +442,302 @@ public void testAddRemoveDirectives() throws Exception { iter = dfs.listPathBasedCacheDescriptors(null, null); assertFalse(iter.hasNext()); } + + @Test(timeout=60000) + public void testCacheManagerRestart() throws Exception { + HdfsConfiguration conf = createCachingConf(); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + + // Create and validate a pool + final String pool = "poolparty"; + String groupName = "partygroup"; + FsPermission mode = new FsPermission((short)0777); + int weight = 747; + dfs.addCachePool(new CachePoolInfo(pool) + .setGroupName(groupName) + .setMode(mode) + .setWeight(weight)); + RemoteIterator pit = dfs.listCachePools(); + assertTrue("No cache pools found", pit.hasNext()); + CachePoolInfo info = pit.next(); + assertEquals(pool, info.getPoolName()); + assertEquals(groupName, info.getGroupName()); + assertEquals(mode, info.getMode()); + assertEquals(weight, (int)info.getWeight()); + assertFalse("Unexpected # of cache pools found", pit.hasNext()); + + // Create some cache entries + int numEntries = 10; + String entryPrefix = "/party-"; + for (int i=0; i dit + = dfs.listPathBasedCacheDescriptors(null, null); + for (int i=0; i() { + @Override + public Boolean get() { + int numCachedBlocks = 0, numCachedReplicas = 0; + namesystem.readLock(); + try { + GSet cachedBlocks = + cacheManager.getCachedBlocks(); + if (cachedBlocks != null) { + for (Iterator iter = cachedBlocks.iterator(); + iter.hasNext(); ) { + CachedBlock cachedBlock = iter.next(); + numCachedBlocks++; + numCachedReplicas += cachedBlock.getDatanodes(Type.CACHED).size(); + } + } + } finally { + namesystem.readUnlock(); + } + if ((numCachedBlocks == expectedCachedBlocks) && + (numCachedReplicas == expectedCachedReplicas)) { + return true; + } else { + LOG.info("cached blocks: have " + numCachedBlocks + + " / " + expectedCachedBlocks); + LOG.info("cached replicas: have " + numCachedReplicas + + " / " + expectedCachedReplicas); + return false; + } + } + }, 500, 60000); + } + + private static final long BLOCK_SIZE = 512; + private static final int NUM_DATANODES = 4; + + // Most Linux installs will allow non-root users to lock 64KB. + private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES; + + /** + * Return true if we can test DN caching. + */ + private static boolean canTestDatanodeCaching() { + if (!NativeIO.isAvailable()) { + // Need NativeIO in order to cache blocks on the DN. + return false; + } + if (NativeIO.getMemlockLimit() < CACHE_CAPACITY) { + return false; + } + return true; + } + + private static HdfsConfiguration createCachingConf() { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY); + conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, true); + conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000); + conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000); + return conf; + } + + @Test(timeout=120000) + public void testWaitForCachedReplicas() throws Exception { + Assume.assumeTrue(canTestDatanodeCaching()); + HdfsConfiguration conf = createCachingConf(); + FileSystemTestHelper helper = new FileSystemTestHelper(); + MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); + + try { + cluster.waitActive(); + DistributedFileSystem dfs = cluster.getFileSystem(); + NameNode namenode = cluster.getNameNode(); + NamenodeProtocols nnRpc = namenode.getRpcServer(); + Path rootDir = helper.getDefaultWorkingDirectory(dfs); + // Create the pool + final String pool = "friendlyPool"; + nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); + // Create some test files + final int numFiles = 2; + final int numBlocksPerFile = 2; + final List paths = new ArrayList(numFiles); + for (int i=0; i entries = + nnRpc.listPathBasedCacheDescriptors(0, null, null); + for (int i=0; i paths = new ArrayList(numFiles); + for (int i=0; i paths = new LinkedList(); + paths.add(new Path("/foo/bar")); + paths.add(new Path("/foo/baz")); + paths.add(new Path("/foo2/bar2")); + paths.add(new Path("/foo2/baz2")); + dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault()); + dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault()); + final int numBlocksPerFile = 2; + for (Path path : paths) { + FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile, + (int)BLOCK_SIZE, (short)3, false); + } + waitForCachedBlocks(namenode, 0, 0); + // cache entire directory + PathBasedCacheDescriptor descriptor = dfs.addPathBasedCacheDirective( + new PathBasedCacheDirective.Builder(). + setPath(new Path("/foo")). + setReplication((short)2). + setPool(pool). + build()); + assertEquals("Descriptor does not match requested pool", pool, + descriptor.getPool()); + waitForCachedBlocks(namenode, 4, 8); + // remove and watch numCached go to 0 + dfs.removePathBasedCacheDescriptor(descriptor); + waitForCachedBlocks(namenode, 0, 0); + } finally { + cluster.shutdown(); + } + } + } From 40c97caf1936edf8fbdee3874e70ef0ef2ddda7d Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 16 Oct 2013 22:15:47 +0000 Subject: [PATCH 42/51] add missing file for HDFS-5096 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532925 13f79535-47bb-0310-9956-ffa450edef68 --- .../hdfs/server/namenode/CachedBlock.java | 249 ++++++++++++++++++ 1 file changed, 249 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java new file mode 100644 index 00000000000..35c86d4a63e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java @@ -0,0 +1,249 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.util.IntrusiveCollection; +import org.apache.hadoop.util.LightWeightGSet; +import org.apache.hadoop.util.IntrusiveCollection.Element; +import org.apache.hadoop.util.LightWeightGSet.LinkedElement; + +/** + * Represents a cached block. + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +public final class CachedBlock implements Element, + LightWeightGSet.LinkedElement { + private static final Object[] EMPTY_ARRAY = new Object[0]; + + /** + * Block id. + */ + private final long blockId; + + /** + * Used to implement #{LightWeightGSet.LinkedElement} + */ + private LinkedElement nextElement; + + /** + * Bit 15: Mark + * Bit 0-14: cache replication factor. + */ + private short replicationAndMark; + + /** + * Used to implement the CachedBlocksList. + * + * Since this CachedBlock can be in multiple CachedBlocksList objects, + * we need to be able to store multiple 'prev' and 'next' pointers. + * The triplets array does this. + * + * Each triplet contains a CachedBlockList object followed by a + * prev pointer, followed by a next pointer. + */ + private Object[] triplets; + + public CachedBlock(long blockId, short replication, boolean mark) { + this.blockId = blockId; + this.triplets = EMPTY_ARRAY; + setReplicationAndMark(replication, mark); + } + + public long getBlockId() { + return blockId; + } + + @Override + public int hashCode() { + return (int)(blockId^(blockId>>>32)); + } + + @Override + public boolean equals(Object o) { + if (o.getClass() != this.getClass()) { + return false; + } + CachedBlock other = (CachedBlock)o; + return other.blockId == blockId; + } + + public void setReplicationAndMark(short replication, boolean mark) { + assert replication >= 0; + replicationAndMark = (short)((replication << 1) | (mark ? 0x1 : 0x0)); + } + + public boolean getMark() { + return ((replicationAndMark & 0x1) != 0); + } + + public short getReplication() { + return (short)(replicationAndMark >>> 1); + } + + /** + * Return true if this CachedBlock is present on the given list. + */ + public boolean isPresent(CachedBlocksList cachedBlocksList) { + for (int i = 0; i < triplets.length; i += 3) { + CachedBlocksList list = (CachedBlocksList)triplets[i]; + if (list == cachedBlocksList) { + return true; + } + } + return false; + } + + /** + * Get a list of the datanodes which this block is cached, + * planned to be cached, or planned to be uncached on. + * + * @param type If null, this parameter is ignored. + * If it is non-null, we match only datanodes which + * have it on this list. + * See {@link DatanodeDescriptor#CachedBlocksList#Type} + * for a description of all the lists. + * + * @return The list of datanodes. Modifying this list does not + * alter the state of the CachedBlock. + */ + public List getDatanodes(Type type) { + List nodes = new LinkedList(); + for (int i = 0; i < triplets.length; i += 3) { + CachedBlocksList list = (CachedBlocksList)triplets[i]; + if ((type == null) || (list.getType() == type)) { + nodes.add(list.getDatanode()); + } + } + return nodes; + } + + @Override + public void insertInternal(IntrusiveCollection list, Element prev, + Element next) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + throw new RuntimeException("Trying to re-insert an element that " + + "is already in the list."); + } + } + Object newTriplets[] = Arrays.copyOf(triplets, triplets.length + 3); + newTriplets[triplets.length] = list; + newTriplets[triplets.length + 1] = prev; + newTriplets[triplets.length + 2] = next; + triplets = newTriplets; + } + + @Override + public void setPrev(IntrusiveCollection list, Element prev) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + triplets[i + 1] = prev; + return; + } + } + throw new RuntimeException("Called setPrev on an element that wasn't " + + "in the list."); + } + + @Override + public void setNext(IntrusiveCollection list, Element next) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + triplets[i + 2] = next; + return; + } + } + throw new RuntimeException("Called setNext on an element that wasn't " + + "in the list."); + } + + @Override + public void removeInternal(IntrusiveCollection list) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + Object[] newTriplets = new Object[triplets.length - 3]; + System.arraycopy(triplets, 0, newTriplets, 0, i); + System.arraycopy(triplets, i + 3, newTriplets, i, + triplets.length - (i + 3)); + triplets = newTriplets; + return; + } + } + throw new RuntimeException("Called remove on an element that wasn't " + + "in the list."); + } + + @Override + public Element getPrev(IntrusiveCollection list) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + return (Element)triplets[i + 1]; + } + } + throw new RuntimeException("Called getPrev on an element that wasn't " + + "in the list."); + } + + @Override + public Element getNext(IntrusiveCollection list) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + return (Element)triplets[i + 2]; + } + } + throw new RuntimeException("Called getNext on an element that wasn't " + + "in the list."); + } + + @Override + public boolean isInList(IntrusiveCollection list) { + for (int i = 0; i < triplets.length; i += 3) { + if (triplets[i] == list) { + return true; + } + } + return false; + } + + @Override + public String toString() { + return new StringBuilder().append("{"). + append("blockId=").append(blockId).append(", "). + append("replication=").append(getReplication()).append(", "). + append("mark=").append(getMark()).append("}"). + toString(); + } + + @Override // LightWeightGSet.LinkedElement + public void setNext(LinkedElement next) { + this.nextElement = next; + } + + @Override // LightWeightGSet.LinkedElement + public LinkedElement getNext() { + return nextElement; + } +} From 34f08944b7c8d58f531a3f3bf3d4ee4cd3fa643a Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 17 Oct 2013 02:14:33 +0000 Subject: [PATCH 43/51] merge trunk to branch HDFS-4949 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532952 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 211 ++++++++- .../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 18 + .../org/apache/hadoop/hdfs/DFSClient.java | 23 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 20 +- .../apache/hadoop/hdfs/DFSInputStream.java | 2 +- .../apache/hadoop/hdfs/DFSOutputStream.java | 58 +-- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 43 ++ .../hadoop/hdfs/DistributedFileSystem.java | 5 +- .../java/org/apache/hadoop/hdfs/HAUtil.java | 35 +- .../apache/hadoop/hdfs/HftpFileSystem.java | 60 +-- .../apache/hadoop/hdfs/HsftpFileSystem.java | 23 +- .../apache/hadoop/hdfs/NameNodeProxies.java | 10 +- .../hadoop/hdfs/protocol/DatanodeID.java | 22 +- .../hadoop/hdfs/protocol/DatanodeInfo.java | 23 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 4 +- .../DelegationTokenSecretManager.java | 19 + .../hadoop/hdfs/server/balancer/Balancer.java | 38 +- .../server/blockmanagement/BlockManager.java | 38 +- .../blockmanagement/BlockPlacementPolicy.java | 74 +-- .../BlockPlacementPolicyDefault.java | 361 ++++++--------- .../BlockPlacementPolicyWithNodeGroup.java | 78 ++-- .../server/blockmanagement/BlocksMap.java | 6 +- .../blockmanagement/DatanodeDescriptor.java | 3 +- .../blockmanagement/DatanodeManager.java | 92 ++-- .../hadoop/hdfs/server/common/JspHelper.java | 68 +-- .../datanode/BlockPoolSliceScanner.java | 16 +- .../hadoop/hdfs/server/datanode/DataNode.java | 169 ++----- .../server/datanode/DatanodeJspHelper.java | 220 ++++----- .../datanode/fsdataset/RollingLogs.java | 6 + .../fsdataset/impl/RollingLogsImpl.java | 7 + .../server/namenode/ClusterJspHelper.java | 13 +- .../hdfs/server/namenode/FSDirectory.java | 10 +- .../hdfs/server/namenode/FSNamesystem.java | 437 ++++++++++-------- .../server/namenode/FileChecksumServlets.java | 11 +- .../hdfs/server/namenode/FileDataServlet.java | 11 +- .../server/namenode/NameNodeHttpServer.java | 20 +- .../server/namenode/NameNodeRpcServer.java | 15 +- .../server/namenode/NamenodeJspHelper.java | 142 +++--- .../hdfs/server/namenode/Namesystem.java | 3 + .../server/namenode/SafeModeException.java | 5 +- .../namenode/StartupProgressServlet.java | 8 +- .../namenode/metrics/FSNamesystemMBean.java | 15 + .../protocol/DisallowedDatanodeException.java | 7 +- .../hadoop/hdfs/util/LightWeightHashSet.java | 2 +- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 29 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 173 +++++-- .../src/main/native/libhdfs/hdfs.c | 2 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 3 +- .../src/main/resources/hdfs-default.xml | 16 + .../src/main/webapps/hdfs/corrupt_files.jsp | 10 +- .../src/main/webapps/hdfs/dfshealth.jsp | 18 +- .../src/main/webapps/hdfs/dfsnodelist.jsp | 4 +- .../src/site/apt/Federation.apt.vm | 4 +- .../images/federation-background.gif | Bin 0 -> 13420 bytes .../src/site/resources/images/federation.gif | Bin 0 -> 18355 bytes .../org/apache/hadoop/fs/TestGlobPaths.java | 143 ++++-- .../fs/TestHDFSFileContextMainOperations.java | 43 +- ...tCommand.java => TestHdfsTextCommand.java} | 2 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 73 +-- .../hadoop/hdfs/TestDFSClientRetries.java | 6 +- .../hadoop/hdfs/TestDFSOutputStream.java | 74 +++ .../org/apache/hadoop/hdfs/TestDFSShell.java | 53 ++- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 67 ++- .../hadoop/hdfs/TestDatanodeBlockScanner.java | 39 ++ .../hadoop/hdfs/TestDatanodeRegistration.java | 23 +- .../apache/hadoop/hdfs/TestDecommission.java | 11 +- .../hdfs/TestDistributedFileSystem.java | 42 +- .../hadoop/hdfs/TestFileInputStreamCache.java | 17 +- .../hadoop/hdfs/TestHftpDelegationToken.java | 10 +- .../hadoop/hdfs/TestHftpFileSystem.java | 24 +- .../hadoop/hdfs/TestNameNodeHttpServer.java | 39 ++ .../org/apache/hadoop/hdfs/TestPeerCache.java | 29 +- .../hdfs/TestShortCircuitLocalRead.java | 60 +++ .../hadoop/hdfs/TestSnapshotCommands.java | 213 +++++++++ .../balancer/TestBalancerWithNodeGroup.java | 12 +- .../blockmanagement/TestCachedBlocksList.java | 6 +- .../TestRBWBlockInvalidation.java | 4 +- .../TestReplicationPolicy.java | 198 ++++---- .../TestReplicationPolicyWithNodeGroup.java | 161 +++---- .../hdfs/server/common/TestJspHelper.java | 171 ++++++- .../server/datanode/DataNodeTestUtils.java | 17 +- .../hdfs/server/datanode/TestDatanodeJsp.java | 17 +- .../namenode/NNThroughputBenchmark.java | 46 +- .../server/namenode/TestAddBlockRetry.java | 4 +- .../server/namenode/TestClusterJspHelper.java | 59 +++ .../TestCommitBlockSynchronization.java | 18 +- .../server/namenode/TestFSNamesystem.java | 20 +- .../namenode/TestFSNamesystemMBean.java | 72 +++ .../namenode/TestFileJournalManager.java | 29 +- .../hdfs/server/namenode/TestINodeFile.java | 56 ++- .../namenode/TestNameNodeJspHelper.java | 290 +++++++++++- .../namenode/TestNamenodeRetryCache.java | 44 +- .../namenode/TestStartupProgressServlet.java | 16 + .../server/namenode/ha/TestDNFencing.java | 4 +- .../ha/TestDelegationTokensWithHA.java | 116 ++++- .../server/namenode/ha/TestHASafeMode.java | 52 ++- .../ha/TestLossyRetryInvocationHandler.java | 57 +++ .../namenode/ha/TestRetryCacheWithHA.java | 13 +- .../namenode/metrics/TestNameNodeMetrics.java | 7 - .../snapshot/TestOpenFilesWithSnapshot.java | 113 +++++ .../snapshot/TestSnapshotDeletion.java | 67 ++- .../namenode/snapshot/TestSnapshotRename.java | 28 ++ .../hadoop/hdfs/web/TestWebHDFSForHA.java | 77 +++ .../hadoop/hdfs/web/WebHdfsTestUtil.java | 2 +- .../src/test/resources/testHDFSConf.xml | 29 +- 105 files changed, 3843 insertions(+), 1640 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/federation-background.gif create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/site/resources/images/federation.gif rename hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/{TestTextCommand.java => TestHdfsTextCommand.java} (99%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestNameNodeHttpServer.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterJspHelper.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f07d3beaba2..5ccd87bc348 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,8 +120,8 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) - HDFS-4953. Enable HDFS local reads via mmap. - (Colin Patrick McCabe via wang). + HDFS-5041. Add the time of last heartbeat to dead server Web UI (Shinichi + Yamashita via brandonli) OPTIMIZATIONS @@ -247,8 +247,19 @@ Release 2.3.0 - UNRELEASED NEW FEATURES + HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA. + (Haohui Mai via jing9) + + HDFS-4953. Enable HDFS local reads via mmap. + (Colin Patrick McCabe via wang). + + HDFS-5342. Provide more information in the FSNamesystem JMX interfaces. + (Haohui Mai via jing9) + IMPROVEMENTS + HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) + HDFS-4657. Limit the number of blocks logged by the NN after a block report to a configurable value. (Aaron T. Myers via Colin Patrick McCabe) @@ -256,9 +267,6 @@ Release 2.3.0 - UNRELEASED HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is disabled but security is turned on. (Kousuke Saruta via harsh) - HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. - (Colin Patrick McCabe) - HDFS-5004. Add additional JMX bean for NameNode status data (Trevor Lorimer via cos) @@ -275,8 +283,46 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) + HDFS-4096. Add snapshot information to namenode WebUI. (Haohui Mai via + jing9) + + HDFS-5188. In BlockPlacementPolicy, reduce the number of chooseTarget(..) + methods; replace HashMap with Map in parameter declarations and cleanup + some related code. (szetszwo) + + HDFS-5207. In BlockPlacementPolicy.chooseTarget(..), change the writer + and the excludedNodes parameter types respectively to Node and Set. + (Junping Du via szetszwo) + + HDFS-5240. Separate formatting from logging in the audit logger API (daryn) + + HDFS-5191. Revisit zero-copy API in FSDataInputStream to make it more + intuitive. (Contributed by Colin Patrick McCabe) + + HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and + branch-2. (cnauroth) + + HDFS-4517. Cover class RemoteBlockReader with unit tests. (Vadim Bondarev + and Dennis Y via kihwal) + + HDFS-4512. Cover package org.apache.hadoop.hdfs.server.common with tests. + (Vadim Bondarev via kihwal) + + HDFS-4510. Cover classes ClusterJspHelper/NamenodeJspHelper with unit + tests. (Andrey Klochkov via kihwal) + + HDFS-5323. Remove some deadcode in BlockManager (Colin Patrick McCabe) + + HDFS-5338. Add a conf to disable hostname check in datanode registration. + (szetszwo) + + HDFS-5130. Add test for snapshot related FsShell and DFSAdmin commands. + (Binglin Chang via jing9) + OPTIMIZATIONS + HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) + BUG FIXES HDFS-5034. Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin Patrick McCabe) @@ -295,7 +341,130 @@ Release 2.3.0 - UNRELEASED HDFS-5170. BlockPlacementPolicyDefault uses the wrong classname when alerting to enable debug logging. (Andrew Wang) -Release 2.1.1-beta - UNRELEASED + HDFS-5031. BlockScanner scans the block multiple times. (Vinay via Arpit + Agarwal) + + HDFS-5266. ElasticByteBufferPool#Key does not implement equals. (cnauroth) + + HDFS-5352. Server#initLog() doesn't close InputStream in httpfs. (Ted Yu via + jing9) + + HDFS-5283. Under construction blocks only inside snapshots should not be + counted in safemode threshhold. (Vinay via szetszwo) + + HDFS-4376. Fix race conditions in Balancer. (Junping Du via szetszwo) + +Release 2.2.1 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + HDFS-5360. Improvement of usage message of renameSnapshot and + deleteSnapshot. (Shinichi Yamashita via wang) + + OPTIMIZATIONS + + BUG FIXES + + HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via + brandonli) + + HDFS-5291. Standby namenode after transition to active goes into safemode. + (jing9) + + HDFS-5317. Go back to DFS Home link does not work on datanode webUI + (Haohui Mai via brandonli) + + HDFS-5316. Namenode ignores the default https port (Haohui Mai via + brandonli) + + HDFS-5281. COMMIT request should not block. (brandonli) + + HDFS-5337. should do hsync for a commit request even there is no pending + writes (brandonli) + + HDFS-5335. Hive query failed with possible race in dfs output stream. + (Haohui Mai via suresh) + + HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA + clusters. (jing9) + + HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter + token. (brandonli) + + HDFS-5330. fix readdir and readdirplus for large directories (brandonli) + + HDFS-5370. Typo in Error Message: different between range in condition + and range in error message. (Kousuke Saruta via suresh) + +Release 2.2.0 - 2013-10-13 + + INCOMPATIBLE CHANGES + + NEW FEATURES + + HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. + (Colin Patrick McCabe) + + HDFS-5230. Introduce RpcInfo to decouple XDR classes from the RPC API. + (Haohui Mai via brandonli) + + IMPROVEMENTS + + HDFS-5246. Make Hadoop nfs server port and mount daemon port + configurable. (Jinghui Wang via brandonli) + + HDFS-5256. Use guava LoadingCache to implement DFSClientCache. (Haohui Mai + via brandonli) + + HDFS-5308. Replace HttpConfig#getSchemePrefix with implicit schemes in HDFS + JSP. (Haohui Mai via jing9) + + OPTIMIZATIONS + + BUG FIXES + + HDFS-5139. Remove redundant -R option from setrep. + + HDFS-5251. Race between the initialization of NameNode and the http + server. (Haohui Mai via suresh) + + HDFS-5258. Skip tests in TestHDFSCLI that are not applicable on Windows. + (Chuan Liu via cnauroth) + + HDFS-5186. TestFileJournalManager fails on Windows due to file handle leaks. + (Chuan Liu via cnauroth) + + HDFS-5268. NFS write commit verifier is not set in a few places (brandonli) + + HDFS-5265. Namenode fails to start when dfs.https.port is unspecified. + (Haohui Mai via jing9) + + HDFS-5255. Distcp job fails with hsftp when https is enabled in insecure + cluster. (Arpit Agarwal) + + HDFS-5279. Guard against NullPointerException in NameNode JSP pages before + initialization of FSNamesystem. (cnauroth) + + HDFS-5289. Race condition in TestRetryCacheWithHA#testCreateSymlink causes + spurious test failure. (atm) + + HDFS-5300. FSNameSystem#deleteSnapshot() should not check owner in case of + permissions disabled. (Vinay via jing9) + + HDFS-5306. Datanode https port is not available at the namenode. (Suresh + Srinivas via brandonli) + + HDFS-5299. DFS client hangs in updatePipeline RPC when failover happened. + (Vinay via jing9) + + HDFS-5259. Support client which combines appended data with old data + before sends it to NFS server. (brandonli) + +Release 2.1.1-beta - 2013-09-23 INCOMPATIBLE CHANGES @@ -336,6 +505,13 @@ Release 2.1.1-beta - UNRELEASED HDFS-5085. Refactor o.a.h.nfs to support different types of authentications. (jing9) + HDFS-5067 Support symlink operations in NFS gateway. (brandonli) + + HDFS-5199 Add more debug trace for NFS READ and WRITE. (brandonli) + + HDFS-5234 Move RpcFrameDecoder out of the public API. + (Haohui Mai via brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may @@ -372,6 +548,12 @@ Release 2.1.1-beta - UNRELEASED HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + HDFS-5212. Refactor RpcMessage and NFS3Response to support different + types of authentication information. (jing9) + + HDFS-4971. Move IO operations out of locking in OpenFileCtx. (brandonli and + jing9) + OPTIMIZATIONS BUG FIXES @@ -440,6 +622,17 @@ Release 2.1.1-beta - UNRELEASED HDFS-5159. Secondary NameNode fails to checkpoint if error occurs downloading edits on first checkpoint. (atm) + HDFS-5192. NameNode may fail to start when + dfs.client.test.drop.namenode.response.number is set. (jing9) + + HDFS-5219. Add configuration keys for retry policy in WebHDFSFileSystem. + (Haohui Mai via jing9) + + HDFS-5231. Fix broken links in the document of HDFS Federation. (Haohui Mai + via jing9) + + HDFS-5249. Fix dumper thread which may die silently. (brandonli) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -893,6 +1086,9 @@ Release 2.1.0-beta - 2013-08-22 HDFS-5016. Deadlock in pipeline recovery causes Datanode to be marked dead. (suresh) + HDFS-5228. The RemoteIterator returned by DistributedFileSystem.listFiles + may throw NullPointerException. (szetszwo and cnauroth via szetszwo) + BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. @@ -3483,6 +3679,9 @@ Release 0.23.10 - UNRELEASED HDFS-5010. Reduce the frequency of getCurrentUser() calls from namenode (kihwal) + HDFS-5346. Avoid unnecessary call to getNumLiveDataNodes() for each block + during IBR processing (Ravi Prakash via kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml index 537dee79c4d..253dba85e25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml @@ -36,7 +36,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../../../../hadoop-common-project/hadoop-common/target + + + + + org.jboss.netty + netty + 3.2.4.Final + + + + + + org.jboss.netty + netty + compile + + commons-logging commons-logging diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index c37d86d7c51..f62b668175c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -525,14 +525,17 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); + NameNodeProxies.ProxyAndInfo proxyInfo = null; if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); - NameNodeProxies.ProxyAndInfo proxyInfo = NameNodeProxies - .createProxyWithLossyRetryHandler(conf, nameNodeUri, - ClientProtocol.class, numResponseToDrop); + proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, + nameNodeUri, ClientProtocol.class, numResponseToDrop); + } + + if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { @@ -543,9 +546,8 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); - NameNodeProxies.ProxyAndInfo proxyInfo = - NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class); - + proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, + ClientProtocol.class); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } @@ -902,10 +904,15 @@ public Token getDelegationToken(Text renewer) assert dtService != null; Token token = namenode.getDelegationToken(renewer); - token.setService(this.dtService); - LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token)); + if (token != null) { + token.setService(this.dtService); + LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token)); + } else { + LOG.info("Cannot get delegation token from " + renewer); + } return token; + } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 5d2b14c50e9..e9eb7be119a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; /** * This class contains constants for configuration keys used @@ -198,7 +199,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; - + + public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check"; + public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true; + public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES = "dfs.namenode.list.cache.pools.num.responses"; public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100; @@ -364,6 +368,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY = "dfs.block.access.token.lifetime"; public static final long DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT = 600L; + public static final String DFS_BLOCK_REPLICATOR_CLASSNAME_KEY = "dfs.block.replicator.classname"; + public static final Class DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT = BlockPlacementPolicyDefault.class; public static final String DFS_REPLICATION_MAX_KEY = "dfs.replication.max"; public static final int DFS_REPLICATION_MAX_DEFAULT = 512; public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval"; @@ -534,4 +540,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // Timeout to wait for block receiver and responder thread to stop public static final String DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY = "dfs.datanode.xceiver.stop.timeout.millis"; public static final long DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT = 60000; + + // WebHDFS retry policy + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.http.client.retry.policy.enabled"; + public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false; + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.http.client.retry.policy.spec"; + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... + public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts"; + public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; + public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis"; + public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; + public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis"; + public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 74fcc6f14ea..49ecb268646 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -403,7 +403,7 @@ private synchronized LocatedBlock getBlockAt(long offset, //check offset if (offset < 0 || offset >= getFileLength()) { - throw new IOException("offset < 0 || offset > getFileLength(), offset=" + throw new IOException("offset < 0 || offset >= getFileLength(), offset=" + offset + ", updatePosition=" + updatePosition + ", locatedBlocks=" + locatedBlocks); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 927c530556d..28332fa0bd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -38,6 +38,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CanSetDropBehind; @@ -85,7 +86,6 @@ import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; -import org.mortbay.log.Log; import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheBuilder; @@ -141,7 +141,7 @@ public class DFSOutputStream extends FSOutputSummer private long bytesCurBlock = 0; // bytes writen in current block private int packetSize = 0; // write packet size, not including the header. private int chunksPerPacket = 0; - private volatile IOException lastException = null; + private final AtomicReference lastException = new AtomicReference(); private long artificialSlowdown = 0; private long lastFlushOffset = 0; // offset when flush was invoked //persist blocks on namenode @@ -809,8 +809,8 @@ private boolean processDatanodeError() throws IOException { if (++pipelineRecoveryCount > 5) { DFSClient.LOG.warn("Error recovering pipeline for writing " + block + ". Already retried 5 times for the same packet."); - lastException = new IOException("Failing write. Tried pipeline " + - "recovery 5 times without success."); + lastException.set(new IOException("Failing write. Tried pipeline " + + "recovery 5 times without success.")); streamerClosed = true; return false; } @@ -1000,8 +1000,8 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { } } if (nodes.length <= 1) { - lastException = new IOException("All datanodes " + pipelineMsg - + " are bad. Aborting..."); + lastException.set(new IOException("All datanodes " + pipelineMsg + + " are bad. Aborting...")); streamerClosed = true; return false; } @@ -1016,7 +1016,7 @@ private boolean setupPipelineForAppendOrRecovery() throws IOException { newnodes.length-errorIndex); nodes = newnodes; hasError = false; - lastException = null; + lastException.set(null); errorIndex = -1; } @@ -1060,7 +1060,7 @@ private DatanodeInfo[] nextBlockOutputStream(String client) throws IOException { ExtendedBlock oldBlock = block; do { hasError = false; - lastException = null; + lastException.set(null); errorIndex = -1; success = false; @@ -1275,9 +1275,7 @@ Token getBlockToken() { } private void setLastException(IOException e) { - if (lastException == null) { - lastException = e; - } + lastException.compareAndSet(null, e); } } @@ -1309,7 +1307,7 @@ static Socket createSocketForPipeline(final DatanodeInfo first, protected void checkClosed() throws IOException { if (closed) { - IOException e = lastException; + IOException e = lastException.get(); throw e != null ? e : new ClosedChannelException(); } } @@ -1465,6 +1463,7 @@ private void queueCurrentPacket() { private void waitAndQueueCurrentPacket() throws IOException { synchronized (dataQueue) { + try { // If queue is full, then wait till we have enough space while (!closed && dataQueue.size() + ackQueue.size() > MAX_PACKETS) { try { @@ -1483,6 +1482,8 @@ private void waitAndQueueCurrentPacket() throws IOException { } checkClosed(); queueCurrentPacket(); + } catch (ClosedChannelException e) { + } } } @@ -1726,7 +1727,7 @@ private void flushOrSync(boolean isSync, EnumSet syncFlags) DFSClient.LOG.warn("Error while syncing", e); synchronized (this) { if (!closed) { - lastException = new IOException("IOException flush:" + e); + lastException.set(new IOException("IOException flush:" + e)); closeThreads(true); } } @@ -1784,21 +1785,25 @@ private void waitForAckedSeqno(long seqno) throws IOException { if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Waiting for ack for: " + seqno); } - synchronized (dataQueue) { - while (!closed) { - checkClosed(); - if (lastAckedSeqno >= seqno) { - break; - } - try { - dataQueue.wait(1000); // when we receive an ack, we notify on dataQueue - } catch (InterruptedException ie) { - throw new InterruptedIOException( - "Interrupted while waiting for data to be acknowledged by pipeline"); + try { + synchronized (dataQueue) { + while (!closed) { + checkClosed(); + if (lastAckedSeqno >= seqno) { + break; + } + try { + dataQueue.wait(1000); // when we receive an ack, we notify on + // dataQueue + } catch (InterruptedException ie) { + throw new InterruptedIOException( + "Interrupted while waiting for data to be acknowledged by pipeline"); + } } } + checkClosed(); + } catch (ClosedChannelException e) { } - checkClosed(); } private synchronized void start() { @@ -1844,7 +1849,7 @@ private void closeThreads(boolean force) throws IOException { @Override public synchronized void close() throws IOException { if (closed) { - IOException e = lastException; + IOException e = lastException.getAndSet(null); if (e == null) return; else @@ -1872,6 +1877,7 @@ public synchronized void close() throws IOException { closeThreads(false); completeFile(lastBlock); dfsClient.endFileLease(src); + } catch (ClosedChannelException e) { } finally { closed = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index e3b61abc0bd..27c0059b791 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -38,6 +38,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.security.SecureRandom; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; @@ -593,6 +594,48 @@ public static Map> getHaNnRpcAddresses( Configuration conf) { return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } + + /** + * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from + * the configuration. + * + * @param conf configuration + * @return list of InetSocketAddresses + */ + public static Map> getHaNnHttpAddresses( + Configuration conf) { + return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + } + + /** + * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver + * when the URL points to an non-HA cluster. When the URL points to an HA + * cluster, the resolver further resolves the logical name (i.e., the authority + * in the URL) into real namenode addresses. + */ + public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort, + Configuration conf) throws IOException { + ArrayList ret = new ArrayList(); + + if (!HAUtil.isLogicalUri(conf, uri)) { + InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), + schemeDefaultPort); + ret.add(addr); + + } else { + Map> addresses = DFSUtil + .getHaNnHttpAddresses(conf); + + for (Map addrs : addresses.values()) { + for (InetSocketAddress addr : addrs.values()) { + ret.add(addr); + } + } + } + + InetSocketAddress[] r = new InetSocketAddress[ret.size()]; + return ret.toArray(r); + } /** * Returns list of InetSocketAddress corresponding to backup node rpc diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index a51c31116ab..d2d316834e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -716,6 +716,7 @@ public FileStatus[] next(final FileSystem fs, final Path p) protected RemoteIterator listLocatedStatus(final Path p, final PathFilter filter) throws IOException { + final Path absF = fixRelativePart(p); return new RemoteIterator() { private DirectoryListing thisListing; private int i; @@ -725,7 +726,7 @@ protected RemoteIterator listLocatedStatus(final Path p, { // initializer // Fully resolve symlinks in path first to avoid additional resolution // round-trips as we fetch more batches of listings - src = getPathName(resolvePath(p)); + src = getPathName(resolvePath(absF)); // fetch the first batch of entries in the directory thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME, true); statistics.incrementReadOps(1); @@ -739,7 +740,7 @@ public boolean hasNext() throws IOException { while (curStat == null && hasNextNoFilter()) { LocatedFileStatus next = ((HdfsLocatedFileStatus)thisListing.getPartialListing()[i++]) - .makeQualifiedLocated(getUri(), p); + .makeQualifiedLocated(getUri(), absF); if (filter.accept(next.getPath())) { curStat = next; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 9674b6d6f7b..7d53fb991d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -17,15 +17,9 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Map; - +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -41,11 +35,17 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; public class HAUtil { @@ -265,10 +265,15 @@ public static void cloneDelegationTokenForLogicalUri( tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { for (InetSocketAddress singleNNAddr : nnAddrs) { + // this is a minor hack to prevent physical HA tokens from being + // exposed to the user via UGI.getCredentials(), otherwise these + // cloned tokens may be inadvertently propagated to jobs Token specificToken = - new Token(haToken); + new Token.PrivateToken(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); - ugi.addToken(specificToken); + Text alias = + new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService()); + ugi.addToken(alias, specificToken); LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index dd5e9c6daa0..361f6a0c462 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -94,7 +94,6 @@ public class HftpFileSystem extends FileSystem private URI hftpURI; protected URI nnUri; - protected URI nnSecureUri; public static final String HFTP_TIMEZONE = "UTC"; public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ"; @@ -134,34 +133,33 @@ protected int getDefaultPort() { DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); } - protected int getDefaultSecurePort() { - return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, - DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); - } - + /** + * We generate the address with one of the following ports, in + * order of preference. + * 1. Port from the hftp URI e.g. hftp://namenode:4000/ will return 4000. + * 2. Port configured via DFS_NAMENODE_HTTP_PORT_KEY + * 3. DFS_NAMENODE_HTTP_PORT_DEFAULT i.e. 50070. + * + * @param uri + * @return + */ protected InetSocketAddress getNamenodeAddr(URI uri) { // use authority so user supplied uri can override port return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); } - protected InetSocketAddress getNamenodeSecureAddr(URI uri) { - // must only use the host and the configured https port - return NetUtils.createSocketAddrForHost(uri.getHost(), getDefaultSecurePort()); - } - protected URI getNamenodeUri(URI uri) { - return DFSUtil.createUri("http", getNamenodeAddr(uri)); - } - - protected URI getNamenodeSecureUri(URI uri) { - return DFSUtil.createUri("http", getNamenodeSecureAddr(uri)); + return DFSUtil.createUri(getUnderlyingProtocol(), getNamenodeAddr(uri)); } + /** + * See the documentation of {@Link #getNamenodeAddr(URI)} for the logic + * behind selecting the canonical service name. + * @return + */ @Override public String getCanonicalServiceName() { - // unlike other filesystems, hftp's service is the secure port, not the - // actual port in the uri - return SecurityUtil.buildTokenService(nnSecureUri).toString(); + return SecurityUtil.buildTokenService(nnUri).toString(); } @Override @@ -187,7 +185,6 @@ public void initialize(final URI name, final Configuration conf) setConf(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.nnUri = getNamenodeUri(name); - this.nnSecureUri = getNamenodeSecureUri(name); try { this.hftpURI = new URI(name.getScheme(), name.getAuthority(), null, null, null); @@ -225,7 +222,7 @@ protected void initDelegationToken() throws IOException { protected Token selectDelegationToken( UserGroupInformation ugi) { - return hftpTokenSelector.selectToken(nnSecureUri, ugi.getTokens(), getConf()); + return hftpTokenSelector.selectToken(nnUri, ugi.getTokens(), getConf()); } @@ -234,6 +231,13 @@ public Token getRenewToken() { return renewToken; } + /** + * Return the underlying protocol that is used to talk to the namenode. + */ + protected String getUnderlyingProtocol() { + return "http"; + } + @Override public synchronized void setDelegationToken(Token token) { renewToken = token; @@ -257,7 +261,7 @@ public synchronized Token getDelegationToken(final String renewer return ugi.doAs(new PrivilegedExceptionAction>() { @Override public Token run() throws IOException { - final String nnHttpUrl = nnSecureUri.toString(); + final String nnHttpUrl = nnUri.toString(); Credentials c; try { c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer); @@ -301,7 +305,7 @@ public URI getUri() { * @throws IOException on error constructing the URL */ protected URL getNamenodeURL(String path, String query) throws IOException { - final URL url = new URL("http", nnUri.getHost(), + final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(), nnUri.getPort(), path + '?' + query); if (LOG.isTraceEnabled()) { LOG.trace("url=" + url); @@ -703,17 +707,20 @@ public boolean isManaged(Token token) throws IOException { return true; } + protected String getUnderlyingProtocol() { + return "http"; + } + @SuppressWarnings("unchecked") @Override public long renew(Token token, Configuration conf) throws IOException { // update the kerberos credentials, if they are coming from a keytab UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); - // use http to renew the token InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); return DelegationTokenFetcher.renewDelegationToken - (DFSUtil.createUri("http", serviceAddr).toString(), + (DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr).toString(), (Token) token); } @@ -723,10 +730,9 @@ public void cancel(Token token, Configuration conf) throws IOException { // update the kerberos credentials, if they are coming from a keytab UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab(); - // use http to cancel the token InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token); DelegationTokenFetcher.cancelDelegationToken - (DFSUtil.createUri("http", serviceAddr).toString(), + (DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr).toString(), (Token) token); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index 6a3bdba593b..5f5c4836953 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -68,6 +68,14 @@ public String getScheme() { return "hsftp"; } + /** + * Return the underlying protocol that is used to talk to the namenode. + */ + @Override + protected String getUnderlyingProtocol() { + return "https"; + } + @Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); @@ -134,24 +142,15 @@ private static void setupSsl(Configuration conf) throws IOException { @Override protected int getDefaultPort() { - return getDefaultSecurePort(); + return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, + DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); } - @Override - protected InetSocketAddress getNamenodeSecureAddr(URI uri) { - return getNamenodeAddr(uri); - } - - @Override - protected URI getNamenodeUri(URI uri) { - return getNamenodeSecureUri(uri); - } - @Override protected HttpURLConnection openConnection(String path, String query) throws IOException { query = addDelegationTokenParam(query); - final URL url = new URL("https", nnUri.getHost(), + final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(), nnUri.getPort(), path + '?' + query); HttpsURLConnection conn; conn = (HttpsURLConnection)connectionFactory.openConnection(url); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 41dac6a80f6..bff0284c74f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -158,8 +158,8 @@ public static ProxyAndInfo createProxy(Configuration conf, * Generate a dummy namenode proxy instance that utilizes our hacked * {@link LossyRetryInvocationHandler}. Proxy instance generated using this * method will proactively drop RPC responses. Currently this method only - * support HA setup. IllegalStateException will be thrown if the given - * configuration is not for HA. + * support HA setup. null will be returned if the given configuration is not + * for HA. * * @param config the configuration containing the required IPC * properties, client failover configurations, etc. @@ -168,7 +168,8 @@ public static ProxyAndInfo createProxy(Configuration conf, * @param xface the IPC interface which should be created * @param numResponseToDrop The number of responses to drop for each RPC call * @return an object containing both the proxy and the associated - * delegation token service it corresponds to + * delegation token service it corresponds to. Will return null of the + * given configuration does not support HA. * @throws IOException if there is an error creating the proxy */ @SuppressWarnings("unchecked") @@ -204,8 +205,9 @@ public static ProxyAndInfo createProxyWithLossyRetryHandler( Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); return new ProxyAndInfo(proxy, dtService); } else { - throw new IllegalStateException("Currently creating proxy using " + + LOG.warn("Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup"); + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index 2a0578ca93f..9a012107b6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -43,6 +43,7 @@ public class DatanodeID implements Comparable { private String storageID; // unique per cluster storageID private int xferPort; // data streaming port private int infoPort; // info server port + private int infoSecurePort; // info server port private int ipcPort; // IPC server port public DatanodeID(DatanodeID from) { @@ -51,6 +52,7 @@ public DatanodeID(DatanodeID from) { from.getStorageID(), from.getXferPort(), from.getInfoPort(), + from.getInfoSecurePort(), from.getIpcPort()); this.peerHostName = from.getPeerHostName(); } @@ -65,12 +67,13 @@ public DatanodeID(DatanodeID from) { * @param ipcPort ipc server port */ public DatanodeID(String ipAddr, String hostName, String storageID, - int xferPort, int infoPort, int ipcPort) { + int xferPort, int infoPort, int infoSecurePort, int ipcPort) { this.ipAddr = ipAddr; this.hostName = hostName; this.storageID = storageID; this.xferPort = xferPort; this.infoPort = infoPort; + this.infoSecurePort = infoSecurePort; this.ipcPort = ipcPort; } @@ -128,6 +131,13 @@ public String getInfoAddr() { return ipAddr + ":" + infoPort; } + /** + * @return IP:infoPort string + */ + public String getInfoSecureAddr() { + return ipAddr + ":" + infoSecurePort; + } + /** * @return hostname:xferPort */ @@ -179,6 +189,13 @@ public int getInfoPort() { return infoPort; } + /** + * @return infoSecurePort (the port at which the HTTPS server bound to) + */ + public int getInfoSecurePort() { + return infoSecurePort; + } + /** * @return ipcPort (the port at which the IPC server bound to) */ @@ -218,13 +235,14 @@ public void updateRegInfo(DatanodeID nodeReg) { peerHostName = nodeReg.getPeerHostName(); xferPort = nodeReg.getXferPort(); infoPort = nodeReg.getInfoPort(); + infoSecurePort = nodeReg.getInfoSecurePort(); ipcPort = nodeReg.getIpcPort(); } /** * Compare based on data transfer address. * - * @param that + * @param that datanode to compare with * @return as specified by Comparable */ @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index dc12a924eaf..17636ed146b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.hdfs.protocol; -import static org.apache.hadoop.hdfs.DFSUtil.percent2String; - -import java.util.Date; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -32,6 +28,10 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import java.util.Date; + +import static org.apache.hadoop.hdfs.DFSUtil.percent2String; + /** * This class extends the primary identifier of a Datanode with ephemeral * state, eg usage information, current administrative state, and the @@ -115,20 +115,23 @@ public DatanodeInfo(DatanodeID nodeID, String location, final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, final long lastUpdate, final int xceiverCount, final AdminStates adminState) { - this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(), - nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, - blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate, xceiverCount, - location, adminState); + this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), + nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(), + nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, + cacheCapacity, cacheUsed, lastUpdate, xceiverCount, location, + adminState); } /** Constructor */ public DatanodeInfo(final String ipAddr, final String hostName, - final String storageID, final int xferPort, final int infoPort, final int ipcPort, + final String storageID, final int xferPort, final int infoPort, + final int infoSecurePort, final int ipcPort, final long capacity, final long dfsUsed, final long remaining, final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, final long lastUpdate, final int xceiverCount, final String networkLocation, final AdminStates adminState) { - super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort); + super(ipAddr, hostName, storageID, xferPort, infoPort, + infoSecurePort, ipcPort); this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4f9ce6c79aa..27586117937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -228,7 +228,8 @@ public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { // DatanodeId public static DatanodeID convert(DatanodeIDProto dn) { return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), - dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); + dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn + .getInfoSecurePort() : 0, dn.getIpcPort()); } public static DatanodeIDProto convert(DatanodeID dn) { @@ -238,6 +239,7 @@ public static DatanodeIDProto convert(DatanodeID dn) { .setStorageID(dn.getStorageID()) .setXferPort(dn.getXferPort()) .setInfoPort(dn.getInfoPort()) + .setInfoSecurePort(dn.getInfoSecurePort()) .setIpcPort(dn.getIpcPort()).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index f233d1f5f72..b2446cbb806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; @@ -115,6 +116,24 @@ public byte[] retrievePassword( return super.retrievePassword(identifier); } + @Override + public byte[] retriableRetrievePassword(DelegationTokenIdentifier identifier) + throws InvalidToken, StandbyException, RetriableException, IOException { + namesystem.checkOperation(OperationCategory.READ); + try { + return super.retrievePassword(identifier); + } catch (InvalidToken it) { + if (namesystem.inTransitionToActive()) { + // if the namesystem is currently in the middle of transition to + // active state, let client retry since the corresponding editlog may + // have not been applied yet + throw new RetriableException(it); + } else { + throw it; + } + } + } + /** * Returns expiry time of a token given its identifier. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 1ed0e1915f4..befdd90a79e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -506,7 +506,7 @@ private static class BalancerDatanode { final DatanodeInfo datanode; final double utilization; final long maxSize2Move; - protected long scheduledSize = 0L; + private long scheduledSize = 0L; // blocks being moved but not confirmed yet private List pendingBlocks = new ArrayList(MAX_NUM_CONCURRENT_MOVES); @@ -555,20 +555,35 @@ protected String getStorageID() { } /** Decide if still need to move more bytes */ - protected boolean hasSpaceForScheduling() { + protected synchronized boolean hasSpaceForScheduling() { return scheduledSize0 && + while(!isTimeUp && getScheduledSize()>0 && (!srcBlockList.isEmpty() || blocksToReceive>0)) { PendingBlockMove pendingBlock = chooseNextBlockToMove(); if (pendingBlock != null) { @@ -779,7 +795,7 @@ private void dispatchBlocks() { // in case no blocks can be moved for source node's task, // jump out of while-loop after 5 iterations. if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) { - scheduledSize = 0; + setScheduledSize(0); } } @@ -992,7 +1008,7 @@ private long chooseNodes() { long bytesToMove = 0L; for (Source src : sources) { - bytesToMove += src.scheduledSize; + bytesToMove += src.getScheduledSize(); } return bytesToMove; } @@ -1093,7 +1109,7 @@ private synchronized void inc( long bytes ) { bytesMoved += bytes; } - private long get() { + private synchronized long get() { return bytesMoved; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 53699fb2aad..35045548c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -74,6 +75,7 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -89,9 +91,6 @@ public class BlockManager { static final Log LOG = LogFactory.getLog(BlockManager.class); public static final Log blockLog = NameNode.blockStateChangeLog; - /** Default load factor of map */ - public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f; - private static final String QUEUE_REASON_CORRUPT_STATE = "it has the wrong state or generation stamp"; @@ -243,7 +242,8 @@ public BlockManager(final Namesystem namesystem, final FSClusterStats stats, invalidateBlocks = new InvalidateBlocks(datanodeManager); // Compute the map capacity by allocating 2% of total memory - blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); + blocksMap = new BlocksMap( + LightWeightGSet.computeCapacity(2.0, "BlocksMap")); blockplacement = BlockPlacementPolicy.getInstance( conf, stats, datanodeManager.getNetworkTopology()); pendingReplications = new PendingReplicationBlocks(conf.getInt( @@ -1256,22 +1256,19 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { namesystem.writeUnlock(); } - HashMap excludedNodes - = new HashMap(); + final Set excludedNodes = new HashSet(); for(ReplicationWork rw : work){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); for (DatanodeDescriptor dn : rw.containingNodes) { - excludedNodes.put(dn, dn); + excludedNodes.add(dn); } // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, // so for now we pass in the block collection itself. - rw.targets = blockplacement.chooseTarget(rw.bc, - rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes, - excludedNodes, rw.block.getNumBytes()); + rw.chooseTargets(blockplacement, excludedNodes); } namesystem.writeLock(); @@ -1378,12 +1375,12 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) { * * @throws IOException * if the number of targets < minimum replication. - * @see BlockPlacementPolicy#chooseTarget(String, int, DatanodeDescriptor, - * List, boolean, HashMap, long) + * @see BlockPlacementPolicy#chooseTarget(String, int, Node, + * List, boolean, Set, long) */ public DatanodeDescriptor[] chooseTarget(final String src, final int numOfReplicas, final DatanodeDescriptor client, - final HashMap excludedNodes, + final Set excludedNodes, final long blocksize, List favoredNodes) throws IOException { List favoredDatanodeDescriptors = getDatanodeDescriptors(favoredNodes); @@ -1783,6 +1780,14 @@ private void processFirstBlockReport(final DatanodeDescriptor node, if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent( node, iblk, reportedState); + // OpenFileBlocks only inside snapshots also will be added to safemode + // threshold. So we need to update such blocks to safemode + // refer HDFS-5283 + BlockInfoUnderConstruction blockUC = (BlockInfoUnderConstruction) storedBlock; + if (namesystem.isInSnapshot(blockUC)) { + int numOfReplicas = blockUC.getNumExpectedLocations(); + namesystem.incrementSafeBlockCount(numOfReplicas); + } //and fall through to next clause } //add replica if appropriate @@ -3256,6 +3261,13 @@ public ReplicationWork(Block block, this.priority = priority; this.targets = null; } + + private void chooseTargets(BlockPlacementPolicy blockplacement, + Set excludedNodes) { + targets = blockplacement.chooseTarget(bc.getName(), + additionalReplRequired, srcNode, liveReplicaNodes, false, + excludedNodes, block.getNumBytes()); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 36a0b2a6c86..48f77298dbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -19,14 +19,15 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -51,25 +52,6 @@ public static class NotEnoughReplicasException extends Exception { } } - /** - * choose numOfReplicas data nodes for writer - * to re-replicate a block with size blocksize - * If not, return as many as we can. - * - * @param srcPath the file to which this chooseTargets is being invoked. - * @param numOfReplicas additional number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param chosenNodes datanodes that have been chosen as targets. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as target - * and sorted as a pipeline. - */ - abstract DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - long blocksize); - /** * choose numOfReplicas data nodes for writer * to re-replicate a block with size blocksize @@ -87,48 +69,22 @@ abstract DatanodeDescriptor[] chooseTarget(String srcPath, */ public abstract DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, - DatanodeDescriptor writer, + Node writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Set excludedNodes, long blocksize); - - /** - * choose numOfReplicas data nodes for writer - * If not, return as many as we can. - * The base implemenatation extracts the pathname of the file from the - * specified srcBC, but this could be a costly operation depending on the - * file system implementation. Concrete implementations of this class should - * override this method to avoid this overhead. - * - * @param srcBC block collection of file for which chooseTarget is invoked. - * @param numOfReplicas additional number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param chosenNodes datanodes that have been chosen as targets. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as target - * and sorted as a pipeline. - */ - DatanodeDescriptor[] chooseTarget(BlockCollection srcBC, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcBC.getName(), numOfReplicas, writer, - chosenNodes, false, excludedNodes, blocksize); - } /** - * Same as {@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, - * HashMap, long)} with added parameter {@code favoredDatanodes} + * Same as {@link #chooseTarget(String, int, Node, List, boolean, + * Set, long)} with added parameter {@code favoredDatanodes} * @param favoredNodes datanodes that should be favored as targets. This * is only a hint and due to cluster state, namenode may not be * able to place the blocks on these datanodes. */ DatanodeDescriptor[] chooseTarget(String src, - int numOfReplicas, DatanodeDescriptor writer, - HashMap excludedNodes, + int numOfReplicas, Node writer, + Set excludedNodes, long blocksize, List favoredNodes) { // This class does not provide the functionality of placing // a block in favored datanodes. The implementations of this class @@ -183,7 +139,7 @@ abstract protected void initialize(Configuration conf, FSClusterStats stats, /** * Get an instance of the configured Block Placement Policy based on the - * value of the configuration paramater dfs.block.replicator.classname. + * the configuration property {@link DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. * * @param conf the configuration to be used * @param stats an object that is used to retrieve the load on the cluster @@ -193,12 +149,12 @@ abstract protected void initialize(Configuration conf, FSClusterStats stats, public static BlockPlacementPolicy getInstance(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap) { - Class replicatorClass = - conf.getClass("dfs.block.replicator.classname", - BlockPlacementPolicyDefault.class, - BlockPlacementPolicy.class); - BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance( - replicatorClass, conf); + final Class replicatorClass = conf.getClass( + DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT, + BlockPlacementPolicy.class); + final BlockPlacementPolicy replicator = ReflectionUtils.newInstance( + replicatorClass, conf); replicator.initialize(conf, stats, clusterMap); return replicator; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index fbb922351bf..7fb4cf01f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -21,8 +21,7 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.TreeSet; @@ -57,6 +56,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { "For more information, please enable DEBUG log level on " + BlockPlacementPolicy.class.getName(); + private static final ThreadLocal debugLoggingBuilder + = new ThreadLocal() { + @Override + protected StringBuilder initialValue() { + return new StringBuilder(); + } + }; + protected boolean considerLoad; private boolean preferLocalNode = true; protected NetworkTopology clusterMap; @@ -95,40 +102,25 @@ public void initialize(Configuration conf, FSClusterStats stats, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); } - protected ThreadLocal threadLocalBuilder = - new ThreadLocal() { - @Override - protected StringBuilder initialValue() { - return new StringBuilder(); - } - }; - @Override public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - long blocksize) { - return chooseTarget(numOfReplicas, writer, chosenNodes, false, - null, blocksize); - } - - @Override - public DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, + Node writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Set excludedNodes, long blocksize) { return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes, blocksize); } @Override - DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, - DatanodeDescriptor writer, HashMap excludedNodes, - long blocksize, List favoredNodes) { + DatanodeDescriptor[] chooseTarget(String src, + int numOfReplicas, + Node writer, + Set excludedNodes, + long blocksize, + List favoredNodes) { try { if (favoredNodes == null || favoredNodes.size() == 0) { // Favored nodes not specified, fall back to regular block placement. @@ -137,8 +129,8 @@ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, excludedNodes, blocksize); } - HashMap favoriteAndExcludedNodes = excludedNodes == null ? - new HashMap() : new HashMap(excludedNodes); + Set favoriteAndExcludedNodes = excludedNodes == null ? + new HashSet() : new HashSet(excludedNodes); // Choose favored nodes List results = new ArrayList(); @@ -157,10 +149,10 @@ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, + " with favored node " + favoredNode); continue; } - favoriteAndExcludedNodes.put(target, target); + favoriteAndExcludedNodes.add(target); } - if (results.size() < numOfReplicas) { + if (results.size() < numOfReplicas) { // Not enough favored nodes, choose other nodes. numOfReplicas -= results.size(); DatanodeDescriptor[] remainingTargets = @@ -181,18 +173,18 @@ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, } /** This is the implementation. */ - DatanodeDescriptor[] chooseTarget(int numOfReplicas, - DatanodeDescriptor writer, + private DatanodeDescriptor[] chooseTarget(int numOfReplicas, + Node writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Set excludedNodes, long blocksize) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { - return new DatanodeDescriptor[0]; + return DatanodeDescriptor.EMPTY_ARRAY; } if (excludedNodes == null) { - excludedNodes = new HashMap(); + excludedNodes = new HashSet(); } int[] result = getMaxNodesPerRack(chosenNodes, numOfReplicas); @@ -204,16 +196,15 @@ DatanodeDescriptor[] chooseTarget(int numOfReplicas, for (DatanodeDescriptor node:chosenNodes) { // add localMachine and related nodes to excludedNodes addToExcludedNodes(node, excludedNodes); - adjustExcludedNodes(excludedNodes, node); } if (!clusterMap.contains(writer)) { - writer=null; + writer = null; } boolean avoidStaleNodes = (stats != null && stats.isAvoidingStaleDataNodesForWrite()); - DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer, + Node localNode = chooseTarget(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); if (!returnChosenNodes) { results.removeAll(chosenNodes); @@ -236,10 +227,20 @@ private int[] getMaxNodesPerRack(List chosenNodes, return new int[] {numOfReplicas, maxNodesPerRack}; } - /* choose numOfReplicas from all data nodes */ - private DatanodeDescriptor chooseTarget(int numOfReplicas, - DatanodeDescriptor writer, - HashMap excludedNodes, + /** + * choose numOfReplicas from all data nodes + * @param numOfReplicas additional number of replicas wanted + * @param writer the writer's machine, could be a non-DatanodeDescriptor node + * @param excludedNodes datanodes that should not be considered as targets + * @param blocksize size of the data to be written + * @param maxNodesPerRack max nodes allowed per rack + * @param results the target nodes already chosen + * @param avoidStaleNodes avoid stale nodes in replica choosing + * @return local node of writer (not chosen node) + */ + private Node chooseTarget(int numOfReplicas, + Node writer, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -251,13 +252,13 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas, int numOfResults = results.size(); boolean newBlock = (numOfResults==0); - if (writer == null && !newBlock) { + if ((writer == null || !(writer instanceof DatanodeDescriptor)) && !newBlock) { writer = results.get(0); } // Keep a copy of original excludedNodes - final HashMap oldExcludedNodes = avoidStaleNodes ? - new HashMap(excludedNodes) : null; + final Set oldExcludedNodes = avoidStaleNodes ? + new HashSet(excludedNodes) : null; try { if (numOfResults == 0) { writer = chooseLocalNode(writer, excludedNodes, blocksize, @@ -304,7 +305,7 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas, // We need to additionally exclude the nodes that were added to the // result list in the successful calls to choose*() above. for (Node node : results) { - oldExcludedNodes.put(node, node); + oldExcludedNodes.add(node); } // Set numOfReplicas, since it can get out of sync with the result list // if the NotEnoughReplicasException was thrown in chooseRandom(). @@ -316,33 +317,30 @@ private DatanodeDescriptor chooseTarget(int numOfReplicas, return writer; } - /* choose localMachine as the target. + /** + * Choose localMachine as the target. * if localMachine is not available, * choose a node on the same rack * @return the chosen node */ - protected DatanodeDescriptor chooseLocalNode( - DatanodeDescriptor localMachine, - HashMap excludedNodes, + protected DatanodeDescriptor chooseLocalNode(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { // if no local machine, randomly choose one node if (localMachine == null) return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); - if (preferLocalNode) { + if (preferLocalNode && localMachine instanceof DatanodeDescriptor) { + DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; // otherwise try local machine first - Node oldNode = excludedNodes.put(localMachine, localMachine); - if (oldNode == null) { // was not in the excluded list - if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false, - results, avoidStaleNodes)) { - results.add(localMachine); - // add localMachine and related nodes to excludedNode - addToExcludedNodes(localMachine, excludedNodes); - return localMachine; + if (excludedNodes.add(localMachine)) { // was not in the excluded list + if (addIfIsGoodTarget(localDatanode, excludedNodes, blocksize, + maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { + return localDatanode; } } } @@ -358,26 +356,25 @@ protected DatanodeDescriptor chooseLocalNode( * @return number of new excluded nodes */ protected int addToExcludedNodes(DatanodeDescriptor localMachine, - HashMap excludedNodes) { - Node node = excludedNodes.put(localMachine, localMachine); - return node == null?1:0; + Set excludedNodes) { + return excludedNodes.add(localMachine) ? 1 : 0; } - /* choose one node from the rack that localMachine is on. + /** + * Choose one node from the rack that localMachine is on. * if no such node is available, choose one node from the rack where * a second replica is on. * if still no such node is available, choose a random node * in the cluster. * @return the chosen node */ - protected DatanodeDescriptor chooseLocalRack( - DatanodeDescriptor localMachine, - HashMap excludedNodes, + protected DatanodeDescriptor chooseLocalRack(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { // no local machine, so choose a random machine if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, @@ -391,9 +388,7 @@ protected DatanodeDescriptor chooseLocalRack( } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -416,7 +411,8 @@ protected DatanodeDescriptor chooseLocalRack( } } - /* choose numOfReplicas nodes from the racks + /** + * Choose numOfReplicas nodes from the racks * that localMachine is NOT on. * if not enough nodes are available, choose the remaining ones * from the local rack @@ -424,12 +420,12 @@ protected DatanodeDescriptor chooseLocalRack( protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, - HashMap excludedNodes, + Set excludedNodes, long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); // randomly choose one node from remote racks try { @@ -443,91 +439,58 @@ protected void chooseRemoteRack(int numOfReplicas, } } - /* Randomly choose one target from nodes. - * @return the chosen node + /** + * Randomly choose one target from the given scope. + * @return the chosen node, if there is any. */ - protected DatanodeDescriptor chooseRandom( - String nodes, - HashMap excludedNodes, - long blocksize, - int maxNodesPerRack, - List results, - boolean avoidStaleNodes) - throws NotEnoughReplicasException { - int numOfAvailableNodes = - clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); - StringBuilder builder = null; - if (LOG.isDebugEnabled()) { - builder = threadLocalBuilder.get(); - builder.setLength(0); - builder.append("["); - } - boolean badTarget = false; - while(numOfAvailableNodes > 0) { - DatanodeDescriptor chosenNode = - (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); - - Node oldNode = excludedNodes.put(chosenNode, chosenNode); - if (oldNode == null) { // chosenNode was not in the excluded list - numOfAvailableNodes--; - if (isGoodTarget(chosenNode, blocksize, - maxNodesPerRack, results, avoidStaleNodes)) { - results.add(chosenNode); - // add chosenNode and related nodes to excludedNode - addToExcludedNodes(chosenNode, excludedNodes); - adjustExcludedNodes(excludedNodes, chosenNode); - return chosenNode; - } else { - badTarget = true; - } - } - } - - String detail = enableDebugLogging; - if (LOG.isDebugEnabled()) { - if (badTarget && builder != null) { - detail = builder.append("]").toString(); - builder.setLength(0); - } else detail = ""; - } - throw new NotEnoughReplicasException(detail); + protected DatanodeDescriptor chooseRandom(String scope, + Set excludedNodes, + long blocksize, + int maxNodesPerRack, + List results, + boolean avoidStaleNodes) + throws NotEnoughReplicasException { + return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack, + results, avoidStaleNodes); } - - /* Randomly choose numOfReplicas targets from nodes. + + /** + * Randomly choose numOfReplicas targets from the given scope. + * @return the first chosen node, if there is any. */ - protected void chooseRandom(int numOfReplicas, - String nodes, - HashMap excludedNodes, + protected DatanodeDescriptor chooseRandom(int numOfReplicas, + String scope, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { - int numOfAvailableNodes = - clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); + int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes( + scope, excludedNodes); StringBuilder builder = null; if (LOG.isDebugEnabled()) { - builder = threadLocalBuilder.get(); + builder = debugLoggingBuilder.get(); builder.setLength(0); builder.append("["); } boolean badTarget = false; + DatanodeDescriptor firstChosen = null; while(numOfReplicas > 0 && numOfAvailableNodes > 0) { DatanodeDescriptor chosenNode = - (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); - Node oldNode = excludedNodes.put(chosenNode, chosenNode); - if (oldNode == null) { + (DatanodeDescriptor)clusterMap.chooseRandom(scope); + if (excludedNodes.add(chosenNode)) { //was not in the excluded list numOfAvailableNodes--; - if (isGoodTarget(chosenNode, blocksize, - maxNodesPerRack, results, avoidStaleNodes)) { + int newExcludedNodes = addIfIsGoodTarget(chosenNode, excludedNodes, + blocksize, maxNodesPerRack, considerLoad, results, avoidStaleNodes); + if (newExcludedNodes >= 0) { numOfReplicas--; - results.add(chosenNode); - // add chosenNode and related nodes to excludedNode - int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes); + if (firstChosen == null) { + firstChosen = chosenNode; + } numOfAvailableNodes -= newExcludedNodes; - adjustExcludedNodes(excludedNodes, chosenNode); } else { badTarget = true; } @@ -544,34 +507,44 @@ protected void chooseRandom(int numOfReplicas, } throw new NotEnoughReplicasException(detail); } - } - - /** - * After choosing a node to place replica, adjust excluded nodes accordingly. - * It should do nothing here as chosenNode is already put into exlcudeNodes, - * but it can be overridden in subclass to put more related nodes into - * excludedNodes. - * - * @param excludedNodes - * @param chosenNode - */ - protected void adjustExcludedNodes(HashMap excludedNodes, - Node chosenNode) { - // do nothing here. + + return firstChosen; } - /* judge if a node is a good target. - * return true if node has enough space, - * does not have too much load, and the rack does not have too many nodes + /** + * If the given node is a good target, add it to the result list and + * update the set of excluded nodes. + * @return -1 if the given is not a good target; + * otherwise, return the number of nodes added to excludedNodes set. */ - private boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerRack, - List results, - boolean avoidStaleNodes) { - return isGoodTarget(node, blockSize, maxTargetPerRack, this.considerLoad, - results, avoidStaleNodes); + int addIfIsGoodTarget(DatanodeDescriptor node, + Set excludedNodes, + long blockSize, + int maxNodesPerRack, + boolean considerLoad, + List results, + boolean avoidStaleNodes) { + if (isGoodTarget(node, blockSize, maxNodesPerRack, considerLoad, + results, avoidStaleNodes)) { + results.add(node); + // add node and related nodes to excludedNode + return addToExcludedNodes(node, excludedNodes); + } else { + return -1; + } } - + + private static void logNodeIsNotChosen(DatanodeDescriptor node, String reason) { + if (LOG.isDebugEnabled()) { + // build the error message for later use. + debugLoggingBuilder.get() + .append(node).append(": ") + .append("Node ").append(NodeBase.getPath(node)) + .append(" is not chosen because ") + .append(reason); + } + } + /** * Determine if a node is a good target. * @@ -588,28 +561,20 @@ private boolean isGoodTarget(DatanodeDescriptor node, * does not have too much load, * and the rack does not have too many nodes. */ - protected boolean isGoodTarget(DatanodeDescriptor node, + private boolean isGoodTarget(DatanodeDescriptor node, long blockSize, int maxTargetPerRack, boolean considerLoad, List results, boolean avoidStaleNodes) { // check if the node is (being) decommissed if (node.isDecommissionInProgress() || node.isDecommissioned()) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is (being) decommissioned "); - } + logNodeIsNotChosen(node, "the node is (being) decommissioned "); return false; } if (avoidStaleNodes) { if (node.isStale(this.staleInterval)) { - if (LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is stale "); - } + logNodeIsNotChosen(node, "the node is stale "); return false; } } @@ -618,11 +583,7 @@ protected boolean isGoodTarget(DatanodeDescriptor node, (node.getBlocksScheduled() * blockSize); // check the remaining capacity of the target machine if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node does not have enough space "); - } + logNodeIsNotChosen(node, "the node does not have enough space "); return false; } @@ -634,11 +595,7 @@ protected boolean isGoodTarget(DatanodeDescriptor node, avgLoad = (double)stats.getTotalLoad()/size; } if (node.getXceiverCount() > (2.0 * avgLoad)) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is too busy "); - } + logNodeIsNotChosen(node, "the node is too busy "); return false; } } @@ -646,31 +603,25 @@ protected boolean isGoodTarget(DatanodeDescriptor node, // check if the target rack has chosen too many nodes String rackname = node.getNetworkLocation(); int counter=1; - for(Iterator iter = results.iterator(); - iter.hasNext();) { - Node result = iter.next(); + for(Node result : results) { if (rackname.equals(result.getNetworkLocation())) { counter++; } } if (counter>maxTargetPerRack) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the rack has too many chosen nodes "); - } + logNodeIsNotChosen(node, "the rack has too many chosen nodes "); return false; } return true; } - /* Return a pipeline of nodes. + /** + * Return a pipeline of nodes. * The pipeline is formed finding a shortest path that * starts from the writer and traverses all nodes * This is basically a traveling salesman problem. */ - private DatanodeDescriptor[] getPipeline( - DatanodeDescriptor writer, + private DatanodeDescriptor[] getPipeline(Node writer, DatanodeDescriptor[] nodes) { if (nodes.length==0) return nodes; @@ -709,7 +660,7 @@ public int verifyBlockPlacement(String srcPath, int minRacks) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) - locs = new DatanodeInfo[0]; + locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack return 0; @@ -724,24 +675,18 @@ public int verifyBlockPlacement(String srcPath, @Override public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc, - Block block, - short replicationFactor, - Collection first, - Collection second) { + Block block, short replicationFactor, + Collection first, + Collection second) { long oldestHeartbeat = now() - heartbeatInterval * tolerateHeartbeatMultiplier; DatanodeDescriptor oldestHeartbeatNode = null; long minSpace = Long.MAX_VALUE; DatanodeDescriptor minSpaceNode = null; - // pick replica from the first Set. If first is empty, then pick replicas - // from second set. - Iterator iter = pickupReplicaSet(first, second); - // Pick the node with the oldest heartbeat or with the least free space, // if all hearbeats are within the tolerable heartbeat interval - while (iter.hasNext() ) { - DatanodeDescriptor node = iter.next(); + for(DatanodeDescriptor node : pickupReplicaSet(first, second)) { long free = node.getRemaining(); long lastHeartbeat = node.getLastUpdate(); if(lastHeartbeat < oldestHeartbeat) { @@ -762,12 +707,10 @@ public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc, * replica while second set contains remaining replica nodes. * So pick up first set if not empty. If first is empty, then pick second. */ - protected Iterator pickupReplicaSet( + protected Collection pickupReplicaSet( Collection first, Collection second) { - Iterator iter = - first.isEmpty() ? second.iterator() : first.iterator(); - return iter; + return first.isEmpty() ? second : first; } @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index e98318b9783..1b8f916dd2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -20,9 +20,9 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -64,8 +64,8 @@ public void initialize(Configuration conf, FSClusterStats stats, * @return the chosen node */ @Override - protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + protected DatanodeDescriptor chooseLocalNode(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // if no local machine, randomly choose one node @@ -73,18 +73,16 @@ protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); - // otherwise try local machine first - Node oldNode = excludedNodes.put(localMachine, localMachine); - if (oldNode == null) { // was not in the excluded list - if (isGoodTarget(localMachine, blocksize, - maxNodesPerRack, false, results, avoidStaleNodes)) { - results.add(localMachine); - // Nodes under same nodegroup should be excluded. - addNodeGroupToExcludedNodes(excludedNodes, - localMachine.getNetworkLocation()); - return localMachine; + if (localMachine instanceof DatanodeDescriptor) { + DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine; + // otherwise try local machine first + if (excludedNodes.add(localMachine)) { // was not in the excluded list + if (addIfIsGoodTarget(localDataNode, excludedNodes, blocksize, + maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { + return localDataNode; + } } - } + } // try a node on local node group DatanodeDescriptor chosenNode = chooseLocalNodeGroup( @@ -98,26 +96,10 @@ protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, blocksize, maxNodesPerRack, results, avoidStaleNodes); } - @Override - protected void adjustExcludedNodes(HashMap excludedNodes, - Node chosenNode) { - // as node-group aware implementation, it should make sure no two replica - // are placing on the same node group. - addNodeGroupToExcludedNodes(excludedNodes, chosenNode.getNetworkLocation()); - } - // add all nodes under specific nodegroup to excludedNodes. - private void addNodeGroupToExcludedNodes(HashMap excludedNodes, - String nodeGroup) { - List leafNodes = clusterMap.getLeaves(nodeGroup); - for (Node node : leafNodes) { - excludedNodes.put(node, node); - } - } - @Override - protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + protected DatanodeDescriptor chooseLocalRack(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -137,9 +119,7 @@ protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -165,7 +145,7 @@ protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, @Override protected void chooseRemoteRack(int numOfReplicas, - DatanodeDescriptor localMachine, HashMap excludedNodes, + DatanodeDescriptor localMachine, Set excludedNodes, long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); @@ -191,8 +171,8 @@ protected void chooseRemoteRack(int numOfReplicas, * @return the chosen node */ private DatanodeDescriptor chooseLocalNodeGroup( - NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + NetworkTopologyWithNodeGroup clusterMap, Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -209,9 +189,7 @@ private DatanodeDescriptor chooseLocalNodeGroup( } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -248,14 +226,14 @@ protected String getRack(final DatanodeInfo cur) { * within the same nodegroup * @return number of new excluded nodes */ - protected int addToExcludedNodes(DatanodeDescriptor localMachine, - HashMap excludedNodes) { + @Override + protected int addToExcludedNodes(DatanodeDescriptor chosenNode, + Set excludedNodes) { int countOfExcludedNodes = 0; - String nodeGroupScope = localMachine.getNetworkLocation(); + String nodeGroupScope = chosenNode.getNetworkLocation(); List leafNodes = clusterMap.getLeaves(nodeGroupScope); for (Node leafNode : leafNodes) { - Node node = excludedNodes.put(leafNode, leafNode); - if (node == null) { + if (excludedNodes.add(leafNode)) { // not a existing node in excludedNodes countOfExcludedNodes++; } @@ -274,12 +252,12 @@ protected int addToExcludedNodes(DatanodeDescriptor localMachine, * If first is empty, then pick second. */ @Override - public Iterator pickupReplicaSet( + public Collection pickupReplicaSet( Collection first, Collection second) { // If no replica within same rack, return directly. if (first.isEmpty()) { - return second.iterator(); + return second; } // Split data nodes in the first set into two sets, // moreThanOne contains nodes on nodegroup with more than one replica @@ -312,9 +290,7 @@ public Iterator pickupReplicaSet( } } - Iterator iter = - moreThanOne.isEmpty() ? exactlyOne.iterator() : moreThanOne.iterator(); - return iter; + return moreThanOne.isEmpty()? exactlyOne : moreThanOne; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index 1e454c9bc86..99dd965ef98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -57,11 +57,11 @@ public void remove() { /** Constant {@link LightWeightGSet} capacity. */ private final int capacity; - private volatile GSet blocks; + private GSet blocks; - BlocksMap(final float loadFactor) { + BlocksMap(int capacity) { // Use 2% of total memory to size the GSet capacity - this.capacity = LightWeightGSet.computeCapacity(2.0, "BlocksMap"); + this.capacity = capacity; this.blocks = new LightWeightGSet(capacity); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 7c0eb79b0b4..f7b43e4e05b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -44,7 +44,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeDescriptor extends DatanodeInfo { - + public static final DatanodeDescriptor[] EMPTY_ARRAY = {}; + // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. public DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 00672acff50..2ecfde8c8b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -17,21 +17,9 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.apache.hadoop.util.Time.now; - -import java.io.IOException; -import java.io.PrintWriter; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.NavigableMap; -import java.util.TreeMap; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.net.InetAddresses; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -41,13 +29,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; @@ -57,34 +40,24 @@ import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; -import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; -import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand; +import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; -import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.CachedDNSToSwitchMapping; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.*; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; -import org.apache.hadoop.net.Node; -import org.apache.hadoop.net.NodeBase; -import org.apache.hadoop.net.ScriptBasedMapping; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.net.InetAddresses; +import java.io.IOException; +import java.io.PrintWriter; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.*; + +import static org.apache.hadoop.util.Time.now; /** * Manage datanodes, include decommission and other activities. @@ -131,6 +104,8 @@ public class DatanodeManager { private final int defaultInfoPort; + private final int defaultInfoSecurePort; + private final int defaultIpcPort; /** Read include/exclude files*/ @@ -170,6 +145,7 @@ public class DatanodeManager { */ private boolean hasClusterEverBeenMultiRack = false; + private final boolean checkIpHostnameInRegistration; /** * Whether we should tell datanodes what to cache in replies to * heartbeat messages. @@ -198,7 +174,10 @@ public class DatanodeManager { DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); this.defaultInfoPort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, - DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); + DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort(); + this.defaultInfoSecurePort = NetUtils.createSocketAddr( + conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, + DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); this.defaultIpcPort = NetUtils.createSocketAddr( conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); @@ -241,6 +220,12 @@ public class DatanodeManager { LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit); + this.checkIpHostnameInRegistration = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, + DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT); + LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY + + "=" + checkIpHostnameInRegistration); + this.avoidStaleDataNodesForRead = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT); @@ -764,11 +749,13 @@ public void registerDatanode(DatanodeRegistration nodeReg) // Mostly called inside an RPC, update ip and peer hostname String hostname = dnAddress.getHostName(); String ip = dnAddress.getHostAddress(); - if (!isNameResolved(dnAddress)) { + if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) { // Reject registration of unresolved datanode to prevent performance // impact of repetitive DNS lookups later. - LOG.warn("Unresolved datanode registration from " + ip); - throw new DisallowedDatanodeException(nodeReg); + final String message = "hostname cannot be resolved (ip=" + + ip + ", hostname=" + hostname + ")"; + LOG.warn("Unresolved datanode registration: " + message); + throw new DisallowedDatanodeException(nodeReg, message); } // update node registration with the ip and hostname from rpc request nodeReg.setIpAddr(ip); @@ -897,7 +884,12 @@ nodes with its data cleared (or user can just remove the StorageID // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. - dnsToSwitchMapping.reloadCachedMappings(); + List invalidNodeNames = new ArrayList(3); + // clear cache for nodes in IP or Hostname + invalidNodeNames.add(nodeReg.getIpAddr()); + invalidNodeNames.add(nodeReg.getHostName()); + invalidNodeNames.add(nodeReg.getPeerHostName()); + dnsToSwitchMapping.reloadCachedMappings(invalidNodeNames); throw e; } } @@ -1133,6 +1125,7 @@ private DatanodeID parseDNFromHostsEntry(String hostLine) { // The IP:port is sufficient for listing in a report dnId = new DatanodeID(hostStr, "", "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } else { String ipAddr = ""; @@ -1143,6 +1136,7 @@ private DatanodeID parseDNFromHostsEntry(String hostLine) { } dnId = new DatanodeID(ipAddr, hostStr, "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } return dnId; @@ -1190,7 +1184,7 @@ public List getDatanodeListForReport( new DatanodeDescriptor(new DatanodeID(entry.getIpAddress(), entry.getPrefix(), "", entry.getPort() == 0 ? defaultXferPort : entry.getPort(), - defaultInfoPort, defaultIpcPort)); + defaultInfoPort, defaultInfoSecurePort, defaultIpcPort)); dn.setLastUpdate(0); // Consider this node dead for reporting nodes.add(dn); } @@ -1209,17 +1203,17 @@ public List getDatanodeListForReport( /** * Checks if name resolution was successful for the given address. If IP * address and host name are the same, then it means name resolution has - * failed. As a special case, the loopback address is also considered + * failed. As a special case, local addresses are also considered * acceptable. This is particularly important on Windows, where 127.0.0.1 does * not resolve to "localhost". * * @param address InetAddress to check - * @return boolean true if name resolution successful or address is loopback + * @return boolean true if name resolution successful or address is local */ private static boolean isNameResolved(InetAddress address) { String hostname = address.getHostName(); String ip = address.getHostAddress(); - return !hostname.equals(ip) || address.isLoopbackAddress(); + return !hostname.equals(ip) || NetUtils.isLocalAddress(address); } private void setDatanodeDead(DatanodeDescriptor node) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index 0b82c12b1bd..a34f2cf217a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -18,24 +18,7 @@ package org.apache.hadoop.hdfs.server.common; -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.URL; -import java.net.URLEncoder; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.TreeSet; - -import javax.servlet.ServletContext; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.jsp.JspWriter; +import com.google.common.base.Charsets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,13 +30,9 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; -import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; @@ -74,10 +53,22 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; -import com.google.common.base.Charsets; +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.jsp.JspWriter; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.URL; +import java.net.URLEncoder; +import java.util.*; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; +import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; @InterfaceAudience.Private public class JspHelper { @@ -112,7 +103,7 @@ public int hashCode() { return super.hashCode(); } } - + // compare two records based on their frequency private static class NodeRecordComparator implements Comparator { @@ -126,6 +117,27 @@ public int compare(NodeRecord o1, NodeRecord o2) { return 0; } } + + /** + * A helper class that generates the correct URL for different schema. + * + */ + public static final class Url { + public static String authority(String scheme, DatanodeID d) { + if (scheme.equals("http")) { + return d.getInfoAddr(); + } else if (scheme.equals("https")) { + return d.getInfoSecureAddr(); + } else { + throw new IllegalArgumentException("Unknown scheme:" + scheme); + } + } + + public static String url(String scheme, DatanodeID d) { + return scheme + "://" + authority(scheme, d); + } + } + public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf) throws IOException { HashMap map = @@ -217,7 +229,7 @@ public static void streamBlockInAscii(InetSocketAddress addr, String poolId, offsetIntoBlock, amtToRead, true, "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey), new DatanodeID(addr.getAddress().getHostAddress(), - addr.getHostName(), poolId, addr.getPort(), 0, 0), null, + addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null, null, null, false, CachingStrategy.newDefaultStrategy()); final byte[] buf = new byte[amtToRead]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index be4f9577baf..32f9d9ea3e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -100,6 +100,7 @@ class BlockPoolSliceScanner { private long currentPeriodStart = Time.now(); private long bytesLeft = 0; // Bytes to scan in this period private long totalBytesToScan = 0; + private boolean isNewPeriod = true; private final LogFileHandler verificationLog; @@ -126,7 +127,10 @@ static class BlockScanInfo extends Block public int compare(BlockScanInfo left, BlockScanInfo right) { final long l = left.lastScanTime; final long r = right.lastScanTime; - return l < r? -1: l > r? 1: 0; + // compare blocks itself if scantimes are same to avoid. + // because TreeMap uses comparator if available to check existence of + // the object. + return l < r? -1: l > r? 1: left.compareTo(right); } }; @@ -148,8 +152,6 @@ public int hashCode() { public boolean equals(Object that) { if (this == that) { return true; - } else if (that == null || !(that instanceof BlockScanInfo)) { - return false; } return super.equals(that); } @@ -539,10 +541,12 @@ private boolean assignInitialVerificationTimes() { entry.genStamp)); if (info != null) { if (processedBlocks.get(entry.blockId) == null) { - updateBytesLeft(-info.getNumBytes()); + if (isNewPeriod) { + updateBytesLeft(-info.getNumBytes()); + } processedBlocks.put(entry.blockId, 1); } - if (logIterator.isPrevious()) { + if (logIterator.isLastReadFromPrevious()) { // write the log entry to current file // so that the entry is preserved for later runs. verificationLog.append(entry.verificationTime, entry.genStamp, @@ -557,6 +561,7 @@ private boolean assignInitialVerificationTimes() { } finally { IOUtils.closeStream(logIterator); } + isNewPeriod = false; } @@ -597,6 +602,7 @@ private synchronized void startNewPeriod() { // reset the byte counts : bytesLeft = totalBytesToScan; currentPeriodStart = Time.now(); + isNewPeriod = true; } private synchronized boolean workRemainingInCurrentPeriod() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 226eb97d5b3..fee84190486 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -18,67 +18,10 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; -import static org.apache.hadoop.util.ExitUtil.terminate; - -import java.io.BufferedOutputStream; -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.PrintStream; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.net.SocketException; -import java.net.SocketTimeoutException; -import java.net.URI; -import java.net.UnknownHostException; -import java.nio.channels.ClosedByInterruptException; -import java.nio.channels.SocketChannel; -import java.security.PrivilegedExceptionAction; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.protobuf.BlockingService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -95,37 +38,15 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.net.DomainPeerServer; import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; -import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; -import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; -import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; -import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; -import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; +import org.apache.hadoop.hdfs.protocol.*; +import org.apache.hadoop.hdfs.protocol.datatransfer.*; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService; -import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; -import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; +import org.apache.hadoop.hdfs.protocolPB.*; +import org.apache.hadoop.hdfs.security.token.block.*; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode; -import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; -import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -140,11 +61,7 @@ import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets; import org.apache.hadoop.hdfs.server.namenode.StreamFile; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; -import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; -import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; -import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; -import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.http.HttpServer; @@ -166,22 +83,21 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.util.*; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.GenericOptionsParser; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.ServicePlugin; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; +import java.io.*; +import java.net.*; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.SocketChannel; +import java.security.PrivilegedExceptionAction; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.util.ExitUtil.terminate; /********************************************************** * DataNode is a class (and program) that stores a set of @@ -263,6 +179,7 @@ public static InetSocketAddress createSocketAddr(String target) { private volatile boolean heartbeatsDisabledForTests = false; private DataStorage storage = null; private HttpServer infoServer = null; + private int infoSecurePort; DataNodeMetrics metrics; private InetSocketAddress streamingAddr; @@ -386,16 +303,13 @@ private void startInfoServer(Configuration conf) throws IOException { InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); - this.infoServer = (secureResources == null) - ? new HttpServer.Builder().setName("datanode") - .setBindAddress(infoHost).setPort(tmpInfoPort) - .setFindPort(tmpInfoPort == 0).setConf(conf) - .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build() - : new HttpServer.Builder().setName("datanode") - .setBindAddress(infoHost).setPort(tmpInfoPort) - .setFindPort(tmpInfoPort == 0).setConf(conf) - .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) - .setConnector(secureResources.getListener()).build(); + HttpServer.Builder builder = new HttpServer.Builder().setName("datanode") + .setBindAddress(infoHost).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))); + this.infoServer = (secureResources == null) ? builder.build() : + builder.setConnector(secureResources.getListener()).build(); + LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, @@ -409,6 +323,7 @@ private void startInfoServer(Configuration conf) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Datanode listening for SSL on " + secInfoSocAddr); } + infoSecurePort = secInfoSocAddr.getPort(); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", @@ -796,7 +711,8 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) { } DatanodeID dnId = new DatanodeID( streamingAddr.getAddress().getHostAddress(), hostName, - getStorageId(), getXferPort(), getInfoPort(), getIpcPort()); + getStorageId(), getXferPort(), getInfoPort(), + infoSecurePort, getIpcPort()); return new DatanodeRegistration(dnId, storageInfo, new ExportedBlockKeys(), VersionInfo.getVersion()); } @@ -894,7 +810,7 @@ void shutdownBlockPool(BPOfferService bpos) { * If this is the first block pool to register, this also initializes * the datanode-scoped storage. * - * @param nsInfo the handshake response from the NN. + * @param bpos Block pool offer service * @throws IOException if the NN is inconsistent with the local storage. */ void initBlockPool(BPOfferService bpos) throws IOException { @@ -1419,15 +1335,13 @@ private void transferBlock(ExtendedBlock block, DatanodeInfo xferTargets[]) int numTargets = xferTargets.length; if (numTargets > 0) { - if (LOG.isInfoEnabled()) { - StringBuilder xfersBuilder = new StringBuilder(); - for (int i = 0; i < numTargets; i++) { - xfersBuilder.append(xferTargets[i]); - xfersBuilder.append(" "); - } - LOG.info(bpReg + " Starting thread to transfer " + - block + " to " + xfersBuilder); + StringBuilder xfersBuilder = new StringBuilder(); + for (int i = 0; i < numTargets; i++) { + xfersBuilder.append(xferTargets[i]); + xfersBuilder.append(" "); } + LOG.info(bpReg + " Starting thread to transfer " + + block + " to " + xfersBuilder); new Daemon(new DataTransfer(xferTargets, block, BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start(); @@ -2351,6 +2265,13 @@ public int getInfoPort() { return infoServer.getPort(); } + /** + * @return the datanode's https port + */ + public int getInfoSecurePort() { + return infoSecurePort; + } + /** * Returned information is a JSON representation of a map with * name node host name as the key and block pool Id as the value. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index 639468bbc75..c931698a32a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -18,10 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.URI; import java.net.URL; import java.net.URLEncoder; import java.security.PrivilegedExceptionAction; @@ -37,9 +36,9 @@ import org.apache.commons.lang.StringEscapeUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -47,20 +46,23 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.JspHelper; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.http.HtmlQuoting; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; + @InterfaceAudience.Private public class DatanodeJspHelper { + private static final int PREV_BLOCK = -1; + private static final int NEXT_BLOCK = 1; + private static DFSClient getDFSClient(final UserGroupInformation user, final String addr, final Configuration conf @@ -143,10 +145,10 @@ static void generateDirectoryStructure(JspWriter out, out.print("Empty file"); } else { DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf); - String fqdn = canonicalize(chosenNode.getIpAddr()); int datanodePort = chosenNode.getXferPort(); - String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" - + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId=" + String redirectLocation = JspHelper.Url.url(req.getScheme(), + chosenNode) + + "/browseBlock.jsp?blockId=" + firstBlock.getBlock().getBlockId() + "&blockSize=" + firstBlock.getBlock().getNumBytes() + "&genstamp=" + firstBlock.getBlock().getGenerationStamp() + "&filename=" @@ -225,7 +227,7 @@ static void generateDirectoryStructure(JspWriter out, JspHelper.addTableFooter(out); } } - out.print("
Go back to DFS home"); dfs.close(); @@ -302,8 +304,7 @@ static void generateFileDetails(JspWriter out, Long.MAX_VALUE).getLocatedBlocks(); // Add the various links for looking at the file contents // URL for downloading the full file - String downloadUrl = HttpConfig.getSchemePrefix() + req.getServerName() + ":" - + req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename) + String downloadUrl = "/streamFile" + ServletUtil.encodePath(filename) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true) + JspHelper.getDelegationTokenUrlParam(tokenString); out.print(""); @@ -319,8 +320,8 @@ static void generateFileDetails(JspWriter out, dfs.close(); return; } - String fqdn = canonicalize(chosenNode.getIpAddr()); - String tailUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + chosenNode.getInfoPort() + + String tailUrl = "///" + JspHelper.Url.authority(req.getScheme(), chosenNode) + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") + "&namenodeInfoPort=" + namenodeInfoPort + "&chunkSizeToView=" + chunkSizeToView @@ -368,8 +369,7 @@ static void generateFileDetails(JspWriter out, for (int j = 0; j < locs.length; j++) { String datanodeAddr = locs[j].getXferAddr(); datanodePort = locs[j].getXferPort(); - fqdn = canonicalize(locs[j].getIpAddr()); - String blockUrl = HttpConfig.getSchemePrefix() + fqdn + ":" + locs[j].getInfoPort() + String blockUrl = "///" + JspHelper.Url.authority(req.getScheme(), locs[j]) + "/browseBlock.jsp?blockId=" + blockidstring + "&blockSize=" + blockSize + "&filename=" + URLEncoder.encode(filename, "UTF-8") @@ -380,7 +380,7 @@ static void generateFileDetails(JspWriter out, + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); - String blockInfoUrl = HttpConfig.getSchemePrefix() + nnCanonicalName + ":" + String blockInfoUrl = "///" + nnCanonicalName + ":" + namenodeInfoPort + "/block_info_xml.jsp?blockId=" + blockidstring; out.print(" " @@ -391,7 +391,7 @@ static void generateFileDetails(JspWriter out, } out.println(""); out.print("
"); - out.print("
Go back to DFS home"); dfs.close(); @@ -491,9 +491,7 @@ static void generateFileChunks(JspWriter out, HttpServletRequest req, String parent = new File(filename).getParent(); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, parent, nnAddr); out.print("
"); - out.print("Advanced view/download options
"); out.print("
"); - // Determine the prev & next blocks - long nextStartOffset = 0; - long nextBlockSize = 0; - String nextBlockIdStr = null; - String nextGenStamp = null; - String nextHost = req.getServerName(); - int nextPort = req.getServerPort(); - int nextDatanodePort = datanodePort; - // determine data for the next link - if (startOffset + chunkSizeToView >= blockSize) { - // we have to go to the next block from this point onwards - List blocks = dfs.getNamenode().getBlockLocations(filename, 0, - Long.MAX_VALUE).getLocatedBlocks(); - for (int i = 0; i < blocks.size(); i++) { - if (blocks.get(i).getBlock().getBlockId() == blockId) { - if (i != blocks.size() - 1) { - LocatedBlock nextBlock = blocks.get(i + 1); - nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId()); - nextGenStamp = Long.toString(nextBlock.getBlock() - .getGenerationStamp()); - nextStartOffset = 0; - nextBlockSize = nextBlock.getBlock().getNumBytes(); - DatanodeInfo d = JspHelper.bestNode(nextBlock, conf); - nextDatanodePort = d.getXferPort(); - nextHost = d.getIpAddr(); - nextPort = d.getInfoPort(); - } - } - } - } else { - // we are in the same block - nextBlockIdStr = blockId.toString(); - nextStartOffset = startOffset + chunkSizeToView; - nextBlockSize = blockSize; - nextGenStamp = genStamp.toString(); - } - String nextUrl = null; - if (nextBlockIdStr != null) { - nextUrl = HttpConfig.getSchemePrefix() + canonicalize(nextHost) + ":" + nextPort - + "/browseBlock.jsp?blockId=" + nextBlockIdStr - + "&blockSize=" + nextBlockSize - + "&startOffset=" + nextStartOffset - + "&genstamp=" + nextGenStamp - + "&filename=" + URLEncoder.encode(filename, "UTF-8") - + "&chunkSizeToView=" + chunkSizeToView - + "&datanodePort=" + nextDatanodePort - + "&namenodeInfoPort=" + namenodeInfoPort - + JspHelper.getDelegationTokenUrlParam(tokenString) - + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); + String authority = req.getServerName() + ":" + req.getServerPort(); + String nextUrl = generateLinksForAdjacentBlock(NEXT_BLOCK, authority, + datanodePort, startOffset, chunkSizeToView, blockSize, blockId, + genStamp, dfs, filename, conf, req.getScheme(), tokenString, + namenodeInfoPort, nnAddr); + if (nextUrl != null) { out.print("View Next chunk  "); } - // determine data for the prev link - String prevBlockIdStr = null; - String prevGenStamp = null; - long prevStartOffset = 0; - long prevBlockSize = 0; - String prevHost = req.getServerName(); - int prevPort = req.getServerPort(); - int prevDatanodePort = datanodePort; - if (startOffset == 0) { - List blocks = dfs.getNamenode().getBlockLocations(filename, 0, - Long.MAX_VALUE).getLocatedBlocks(); - for (int i = 0; i < blocks.size(); i++) { - if (blocks.get(i).getBlock().getBlockId() == blockId) { - if (i != 0) { - LocatedBlock prevBlock = blocks.get(i - 1); - prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId()); - prevGenStamp = Long.toString(prevBlock.getBlock() - .getGenerationStamp()); - prevStartOffset = prevBlock.getBlock().getNumBytes() - - chunkSizeToView; - if (prevStartOffset < 0) - prevStartOffset = 0; - prevBlockSize = prevBlock.getBlock().getNumBytes(); - DatanodeInfo d = JspHelper.bestNode(prevBlock, conf); - prevDatanodePort = d.getXferPort(); - prevHost = d.getIpAddr(); - prevPort = d.getInfoPort(); - } - } - } - } else { - // we are in the same block - prevBlockIdStr = blockId.toString(); - prevStartOffset = startOffset - chunkSizeToView; - if (prevStartOffset < 0) - prevStartOffset = 0; - prevBlockSize = blockSize; - prevGenStamp = genStamp.toString(); - } - String prevUrl = null; - if (prevBlockIdStr != null) { - prevUrl = HttpConfig.getSchemePrefix() + canonicalize(prevHost) + ":" + prevPort - + "/browseBlock.jsp?blockId=" + prevBlockIdStr - + "&blockSize=" + prevBlockSize - + "&startOffset=" + prevStartOffset - + "&filename=" + URLEncoder.encode(filename, "UTF-8") - + "&chunkSizeToView=" + chunkSizeToView - + "&genstamp=" + prevGenStamp - + "&datanodePort=" + prevDatanodePort - + "&namenodeInfoPort=" + namenodeInfoPort - + JspHelper.getDelegationTokenUrlParam(tokenString) - + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); + String prevUrl = generateLinksForAdjacentBlock(PREV_BLOCK, authority, + datanodePort, startOffset, chunkSizeToView, blockSize, blockId, + genStamp, dfs, filename, conf, req.getScheme(), tokenString, + namenodeInfoPort, nnAddr); + if (prevUrl != null) { out.print("View Prev chunk  "); } + out.print("
"); out.print("