diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 83bb0abe6c3..b7b45d1fc21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
@@ -128,6 +129,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly(atimeAffected = true)
LocatedBlocks getBlockLocations(String src, long offset, long length)
throws IOException;
@@ -137,6 +139,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
FsServerDefaults getServerDefaults() throws IOException;
/**
@@ -277,6 +280,7 @@ public interface ClientProtocol {
* @return All the in-use block storage policies currently.
*/
@Idempotent
+ @ReadOnly
BlockStoragePolicy[] getStoragePolicies() throws IOException;
/**
@@ -319,6 +323,7 @@ public interface ClientProtocol {
* If file/dir src
is not found
*/
@Idempotent
+ @ReadOnly
BlockStoragePolicy getStoragePolicy(String path) throws IOException;
/**
@@ -685,6 +690,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException;
@@ -696,6 +702,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred.
*/
@Idempotent
+ @ReadOnly
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;
@@ -776,6 +783,7 @@ public interface ClientProtocol {
*
*/
@Idempotent
+ @ReadOnly
long[] getStats() throws IOException;
/**
@@ -783,6 +791,7 @@ public interface ClientProtocol {
* in the filesystem.
*/
@Idempotent
+ @ReadOnly
ReplicatedBlockStats getReplicatedBlockStats() throws IOException;
/**
@@ -790,6 +799,7 @@ public interface ClientProtocol {
* in the filesystem.
*/
@Idempotent
+ @ReadOnly
ECBlockGroupStats getECBlockGroupStats() throws IOException;
/**
@@ -799,6 +809,7 @@ public interface ClientProtocol {
* otherwise all datanodes if type is ALL.
*/
@Idempotent
+ @ReadOnly
DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
@@ -806,6 +817,7 @@ public interface ClientProtocol {
* Get a report on the current datanode storages.
*/
@Idempotent
+ @ReadOnly
DatanodeStorageReport[] getDatanodeStorageReport(
HdfsConstants.DatanodeReportType type) throws IOException;
@@ -818,6 +830,7 @@ public interface ClientProtocol {
* a symlink.
*/
@Idempotent
+ @ReadOnly
long getPreferredBlockSize(String filename)
throws IOException;
@@ -971,6 +984,7 @@ public interface ClientProtocol {
* cookie returned from the previous call.
*/
@Idempotent
+ @ReadOnly
CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException;
@@ -1006,6 +1020,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
HdfsFileStatus getFileInfo(String src) throws IOException;
/**
@@ -1020,6 +1035,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
boolean isFileClosed(String src) throws IOException;
/**
@@ -1036,6 +1052,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
HdfsFileStatus getFileLinkInfo(String src) throws IOException;
/**
@@ -1050,6 +1067,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken)
throws IOException;
@@ -1064,6 +1082,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
ContentSummary getContentSummary(String path) throws IOException;
/**
@@ -1176,6 +1195,7 @@ public interface ClientProtocol {
* or an I/O error occurred
*/
@Idempotent
+ @ReadOnly
String getLinkTarget(String path) throws IOException;
/**
@@ -1246,6 +1266,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
DataEncryptionKey getDataEncryptionKey() throws IOException;
/**
@@ -1314,6 +1335,7 @@ public interface ClientProtocol {
* @throws IOException on error
*/
@Idempotent
+ @ReadOnly
SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException;
@@ -1341,6 +1363,7 @@ public interface ClientProtocol {
* @throws IOException on error
*/
@Idempotent
+ @ReadOnly
SnapshotDiffReportListing getSnapshotDiffReportListing(String snapshotRoot,
String fromSnapshot, String toSnapshot, byte[] startPath, int index)
throws IOException;
@@ -1387,6 +1410,7 @@ public interface ClientProtocol {
* @return A batch of CacheDirectiveEntry objects.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException;
@@ -1428,6 +1452,7 @@ public interface ClientProtocol {
* @return A batch of CachePoolEntry objects.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listCachePools(String prevPool)
throws IOException;
@@ -1474,6 +1499,7 @@ public interface ClientProtocol {
* Gets the ACLs of files and directories.
*/
@Idempotent
+ @ReadOnly
AclStatus getAclStatus(String src) throws IOException;
/**
@@ -1487,6 +1513,7 @@ public interface ClientProtocol {
* Get the encryption zone for a path.
*/
@Idempotent
+ @ReadOnly
EncryptionZone getEZForPath(String src)
throws IOException;
@@ -1498,6 +1525,7 @@ public interface ClientProtocol {
* @return Batch of encryption zones.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listEncryptionZones(
long prevId) throws IOException;
@@ -1522,6 +1550,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
BatchedEntries listReencryptionStatus(long prevId)
throws IOException;
@@ -1555,6 +1584,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
List getXAttrs(String src, List xAttrs)
throws IOException;
@@ -1570,6 +1600,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
List listXAttrs(String src)
throws IOException;
@@ -1604,6 +1635,7 @@ public interface ClientProtocol {
* @throws IOException see specific implementation
*/
@Idempotent
+ @ReadOnly
void checkAccess(String path, FsAction mode) throws IOException;
/**
@@ -1612,6 +1644,7 @@ public interface ClientProtocol {
* the starting point for the inotify event stream.
*/
@Idempotent
+ @ReadOnly
long getCurrentEditLogTxid() throws IOException;
/**
@@ -1619,6 +1652,7 @@ public interface ClientProtocol {
* transactions for txids equal to or greater than txid.
*/
@Idempotent
+ @ReadOnly
EventBatchList getEditsFromTxid(long txid) throws IOException;
/**
@@ -1676,6 +1710,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws IOException;
/**
@@ -1684,6 +1719,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
Map getErasureCodingCodecs() throws IOException;
/**
@@ -1694,6 +1730,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException;
/**
@@ -1705,6 +1742,11 @@ public interface ClientProtocol {
/**
* Get {@link QuotaUsage} rooted at the specified directory.
+ *
+ * Note: due to HDFS-6763, standby/observer doesn't keep up-to-date info
+ * about quota usage, and thus even though this is ReadOnly, it can only be
+ * directed to the active namenode.
+ *
* @param path The string representation of the path
*
* @throws AccessControlException permission denied
@@ -1714,6 +1756,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly(activeOnly = true)
QuotaUsage getQuotaUsage(String path) throws IOException;
/**
@@ -1727,6 +1770,7 @@ public interface ClientProtocol {
*/
@Idempotent
@Deprecated
+ @ReadOnly
BatchedEntries listOpenFiles(long prevId) throws IOException;
/**
@@ -1741,6 +1785,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
BatchedEntries listOpenFiles(long prevId,
EnumSet openFilesTypes, String path) throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java
new file mode 100644
index 00000000000..1782dcb6d84
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Marker interface used to annotate methods that are readonly.
+ */
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.METHOD)
+@InterfaceStability.Evolving
+public @interface ReadOnly {
+ /**
+ * @return if true, the annotated method may update the last accessed time
+ * while performing its read, if access time is enabled.
+ */
+ boolean atimeAffected() default false;
+
+ /**
+ * @return if true, the target method should only be invoked on the active
+ * namenode. This applies to operations that need to access information that
+ * is only available on the active namenode.
+ */
+ boolean activeOnly() default false;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
new file mode 100644
index 00000000000..34e84fa4894
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Testing class for {@link ReadOnly} annotation on {@link ClientProtocol}.
+ */
+public class TestReadOnly {
+ private static final Method[] ALL_METHODS = ClientProtocol.class.getMethods();
+ private static final Set READONLY_METHOD_NAMES = new HashSet<>(
+ Arrays.asList(
+ "getBlockLocations",
+ "getServerDefaults",
+ "getStoragePolicies",
+ "getStoragePolicy",
+ "getListing",
+ "getSnapshottableDirListing",
+ "getPreferredBlockSize",
+ "listCorruptFileBlocks",
+ "getFileInfo",
+ "isFileClosed",
+ "getFileLinkInfo",
+ "getLocatedFileInfo",
+ "getContentSummary",
+ "getLinkTarget",
+ "getSnapshotDiffReport",
+ "getSnapshotDiffReportListing",
+ "listCacheDirectives",
+ "listCachePools",
+ "getAclStatus",
+ "getEZForPath",
+ "listEncryptionZones",
+ "listReencryptionStatus",
+ "getXAttrs",
+ "listXAttrs",
+ "checkAccess",
+ "getErasureCodingPolicies",
+ "getErasureCodingCodecs",
+ "getErasureCodingPolicy",
+ "listOpenFiles",
+ "getStats",
+ "getReplicatedBlockStats",
+ "getECBlockGroupStats",
+ "getDatanodeReport",
+ "getDatanodeStorageReport",
+ "getDataEncryptionKey",
+ "getCurrentEditLogTxid",
+ "getEditsFromTxid",
+ "getQuotaUsage"
+ )
+ );
+
+ @Test
+ public void testReadOnly() {
+ for (Method m : ALL_METHODS) {
+ boolean expected = READONLY_METHOD_NAMES.contains(m.getName());
+ checkIsReadOnly(m.getName(), expected);
+ }
+ }
+
+ private void checkIsReadOnly(String methodName, boolean expected) {
+ for (Method m : ALL_METHODS) {
+ // Note here we only check the FIRST result of overloaded methods
+ // with the same name. The assumption is that all these methods should
+ // share the same annotation.
+ if (m.getName().equals(methodName)) {
+ assertEquals("Expected ReadOnly for method '" + methodName +
+ "' to be " + expected,
+ m.isAnnotationPresent(ReadOnly.class), expected);
+ return;
+ }
+ }
+ throw new IllegalArgumentException("Unknown method name: " + methodName);
+ }
+
+}