diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index a9c8d7e130e..360dbdcde7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
@@ -125,6 +126,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly(atimeAffected = true)
LocatedBlocks getBlockLocations(String src, long offset, long length)
throws IOException;
@@ -134,6 +136,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
FsServerDefaults getServerDefaults() throws IOException;
/**
@@ -266,6 +269,7 @@ public interface ClientProtocol {
* @return All the in-use block storage policies currently.
*/
@Idempotent
+ @ReadOnly
BlockStoragePolicy[] getStoragePolicies() throws IOException;
/**
@@ -308,6 +312,7 @@ public interface ClientProtocol {
* If file/dir src
is not found
*/
@Idempotent
+ @ReadOnly
BlockStoragePolicy getStoragePolicy(String path) throws IOException;
/**
@@ -674,6 +679,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException;
@@ -684,6 +690,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException;
@@ -755,6 +762,7 @@ public interface ClientProtocol {
* actual numbers to index into the array.
*/
@Idempotent
+ @ReadOnly
long[] getStats() throws IOException;
/**
@@ -764,6 +772,7 @@ public interface ClientProtocol {
* otherwise all datanodes if type is ALL.
*/
@Idempotent
+ @ReadOnly
DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
throws IOException;
@@ -771,6 +780,7 @@ public interface ClientProtocol {
* Get a report on the current datanode storages.
*/
@Idempotent
+ @ReadOnly
DatanodeStorageReport[] getDatanodeStorageReport(
HdfsConstants.DatanodeReportType type) throws IOException;
@@ -783,6 +793,7 @@ public interface ClientProtocol {
* a symlink.
*/
@Idempotent
+ @ReadOnly
long getPreferredBlockSize(String filename)
throws IOException;
@@ -922,6 +933,7 @@ public interface ClientProtocol {
* cookie returned from the previous call.
*/
@Idempotent
+ @ReadOnly
CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException;
@@ -957,6 +969,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
HdfsFileStatus getFileInfo(String src) throws IOException;
/**
@@ -971,6 +984,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
boolean isFileClosed(String src) throws IOException;
/**
@@ -987,6 +1001,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
HdfsFileStatus getFileLinkInfo(String src) throws IOException;
/**
@@ -1000,6 +1015,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly
ContentSummary getContentSummary(String path) throws IOException;
/**
@@ -1112,6 +1128,7 @@ public interface ClientProtocol {
* or an I/O error occurred
*/
@Idempotent
+ @ReadOnly
String getLinkTarget(String path) throws IOException;
/**
@@ -1182,6 +1199,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
DataEncryptionKey getDataEncryptionKey() throws IOException;
/**
@@ -1250,6 +1268,7 @@ public interface ClientProtocol {
* @throws IOException on error
*/
@Idempotent
+ @ReadOnly
SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String fromSnapshot, String toSnapshot) throws IOException;
@@ -1295,6 +1314,7 @@ public interface ClientProtocol {
* @return A batch of CacheDirectiveEntry objects.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException;
@@ -1336,6 +1356,7 @@ public interface ClientProtocol {
* @return A batch of CachePoolEntry objects.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listCachePools(String prevPool)
throws IOException;
@@ -1382,6 +1403,7 @@ public interface ClientProtocol {
* Gets the ACLs of files and directories.
*/
@Idempotent
+ @ReadOnly
AclStatus getAclStatus(String src) throws IOException;
/**
@@ -1395,6 +1417,7 @@ public interface ClientProtocol {
* Get the encryption zone for a path.
*/
@Idempotent
+ @ReadOnly
EncryptionZone getEZForPath(String src)
throws IOException;
@@ -1406,6 +1429,7 @@ public interface ClientProtocol {
* @return Batch of encryption zones.
*/
@Idempotent
+ @ReadOnly
BatchedEntries listEncryptionZones(
long prevId) throws IOException;
@@ -1439,6 +1463,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
List getXAttrs(String src, List xAttrs)
throws IOException;
@@ -1454,6 +1479,7 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
List listXAttrs(String src)
throws IOException;
@@ -1488,6 +1514,7 @@ public interface ClientProtocol {
* @throws IOException see specific implementation
*/
@Idempotent
+ @ReadOnly
void checkAccess(String path, FsAction mode) throws IOException;
/**
@@ -1496,6 +1523,7 @@ public interface ClientProtocol {
* the starting point for the inotify event stream.
*/
@Idempotent
+ @ReadOnly
long getCurrentEditLogTxid() throws IOException;
/**
@@ -1503,10 +1531,16 @@ public interface ClientProtocol {
* transactions for txids equal to or greater than txid.
*/
@Idempotent
+ @ReadOnly
EventBatchList getEditsFromTxid(long txid) throws IOException;
/**
* Get {@link QuotaUsage} rooted at the specified directory.
+ *
+ * Note: due to HDFS-6763, standby/observer doesn't keep up-to-date info
+ * about quota usage, and thus even though this is ReadOnly, it can only be
+ * directed to the active namenode.
+ *
* @param path The string representation of the path
*
* @throws AccessControlException permission denied
@@ -1516,6 +1550,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
@Idempotent
+ @ReadOnly(activeOnly = true)
QuotaUsage getQuotaUsage(String path) throws IOException;
/**
@@ -1528,5 +1563,6 @@ public interface ClientProtocol {
* @throws IOException
*/
@Idempotent
+ @ReadOnly
BatchedEntries listOpenFiles(long prevId) throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java
new file mode 100644
index 00000000000..1782dcb6d84
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Marker interface used to annotate methods that are readonly.
+ */
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.METHOD)
+@InterfaceStability.Evolving
+public @interface ReadOnly {
+ /**
+ * @return if true, the annotated method may update the last accessed time
+ * while performing its read, if access time is enabled.
+ */
+ boolean atimeAffected() default false;
+
+ /**
+ * @return if true, the target method should only be invoked on the active
+ * namenode. This applies to operations that need to access information that
+ * is only available on the active namenode.
+ */
+ boolean activeOnly() default false;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
new file mode 100644
index 00000000000..34e84fa4894
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Testing class for {@link ReadOnly} annotation on {@link ClientProtocol}.
+ */
+public class TestReadOnly {
+ private static final Method[] ALL_METHODS = ClientProtocol.class.getMethods();
+ private static final Set READONLY_METHOD_NAMES = new HashSet<>(
+ Arrays.asList(
+ "getBlockLocations",
+ "getServerDefaults",
+ "getStoragePolicies",
+ "getStoragePolicy",
+ "getListing",
+ "getSnapshottableDirListing",
+ "getPreferredBlockSize",
+ "listCorruptFileBlocks",
+ "getFileInfo",
+ "isFileClosed",
+ "getFileLinkInfo",
+ "getLocatedFileInfo",
+ "getContentSummary",
+ "getLinkTarget",
+ "getSnapshotDiffReport",
+ "getSnapshotDiffReportListing",
+ "listCacheDirectives",
+ "listCachePools",
+ "getAclStatus",
+ "getEZForPath",
+ "listEncryptionZones",
+ "listReencryptionStatus",
+ "getXAttrs",
+ "listXAttrs",
+ "checkAccess",
+ "getErasureCodingPolicies",
+ "getErasureCodingCodecs",
+ "getErasureCodingPolicy",
+ "listOpenFiles",
+ "getStats",
+ "getReplicatedBlockStats",
+ "getECBlockGroupStats",
+ "getDatanodeReport",
+ "getDatanodeStorageReport",
+ "getDataEncryptionKey",
+ "getCurrentEditLogTxid",
+ "getEditsFromTxid",
+ "getQuotaUsage"
+ )
+ );
+
+ @Test
+ public void testReadOnly() {
+ for (Method m : ALL_METHODS) {
+ boolean expected = READONLY_METHOD_NAMES.contains(m.getName());
+ checkIsReadOnly(m.getName(), expected);
+ }
+ }
+
+ private void checkIsReadOnly(String methodName, boolean expected) {
+ for (Method m : ALL_METHODS) {
+ // Note here we only check the FIRST result of overloaded methods
+ // with the same name. The assumption is that all these methods should
+ // share the same annotation.
+ if (m.getName().equals(methodName)) {
+ assertEquals("Expected ReadOnly for method '" + methodName +
+ "' to be " + expected,
+ m.isAnnotationPresent(ReadOnly.class), expected);
+ return;
+ }
+ }
+ throw new IllegalArgumentException("Unknown method name: " + methodName);
+ }
+
+}