diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java index db7f6a91f25..29581109328 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ha; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.KerberosInfo; @@ -106,6 +107,7 @@ public interface HAServiceProtocol { * @throws IOException * if other errors happen */ + @Idempotent public void monitorHealth() throws HealthCheckFailedException, AccessControlException, IOException; @@ -121,6 +123,7 @@ public interface HAServiceProtocol { * @throws IOException * if other errors happen */ + @Idempotent public void transitionToActive(StateChangeRequestInfo reqInfo) throws ServiceFailedException, AccessControlException, @@ -137,6 +140,7 @@ public interface HAServiceProtocol { * @throws IOException * if other errors happen */ + @Idempotent public void transitionToStandby(StateChangeRequestInfo reqInfo) throws ServiceFailedException, AccessControlException, @@ -152,6 +156,7 @@ public interface HAServiceProtocol { * @throws IOException * if other errors happen */ + @Idempotent public HAServiceStatus getServiceStatus() throws AccessControlException, IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java new file mode 100644 index 00000000000..624bb43ab7c --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.retry; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Used to mark certain methods of an interface with at-most-once semantics. + * + * Server must guarantee that methods are executed at most once, by keeping + * a retry cache. The previous response must be returned when duplicate + * requests are received. Because of these guarantee, a client can retry + * this request on failover and other network failure conditions. + */ +@Inherited +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +@InterfaceStability.Evolving +public @interface AtMostOnce { + +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java index b72e3ed6df7..005b2948ea2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /** @@ -43,12 +44,13 @@ public interface RefreshUserMappingsProtocol { * Refresh user to group mappings. * @throws IOException */ + @Idempotent public void refreshUserToGroupsMappings() throws IOException; /** * Refresh superuser proxy group list * @throws IOException */ - public void refreshSuperUserGroupsConfiguration() - throws IOException; + @Idempotent + public void refreshSuperUserGroupsConfiguration() throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java index 4407a7e8e32..0f0b25d8344 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /** @@ -42,5 +43,6 @@ public interface RefreshAuthorizationPolicyProtocol { * Refresh the service-level authorization policy in-effect. * @throws IOException */ + @Idempotent void refreshServiceAcl() throws IOException; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java index c0c107933f5..3e80ac030d3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.retry.Idempotent; /** * Protocol implemented by the Name Node and Job Tracker which maps users to @@ -41,5 +42,6 @@ public interface GetUserMappingsProtocol { * @return The set of groups the user belongs to. * @throws IOException */ + @Idempotent public String[] getGroupsForUser(String user) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 48706aa0c3c..1e5f9b81fa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -105,6 +105,9 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4373. Add HTTP API for querying NameNode startup progress. (cnauroth) HDFS-4374. Display NameNode startup progress in UI. (cnauroth) + + HDFS-4974. Add Idempotent and AtMostOnce annotations to namenode + protocol methods. (suresh) IMPROVEMENTS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index d77cde5819e..5789c3615eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.retry.AtMostOnce; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.KerberosInfo; @@ -139,7 +140,7 @@ public interface ClientProtocol { *

* Blocks have a maximum size. Clients that intend to create * multi-block files must also use - * {@link #addBlock(String, String, ExtendedBlock, DatanodeInfo[])} + * {@link #addBlock} * * @param src path of the file being created. * @param masked masked permission. @@ -170,7 +171,10 @@ public interface ClientProtocol { * * RuntimeExceptions: * @throws InvalidPathException Path src is invalid + *

+ * Note that create with {@link CreateFlag#OVERWRITE} is idempotent. */ + @AtMostOnce public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable flag, boolean createParent, short replication, long blockSize) @@ -204,6 +208,7 @@ public interface ClientProtocol { * RuntimeExceptions: * @throws UnsupportedOperationException if append is not supported */ + @AtMostOnce public LocatedBlock append(String src, String clientName) throws AccessControlException, DSQuotaExceededException, FileNotFoundException, SafeModeException, UnresolvedLinkException, @@ -409,6 +414,7 @@ public interface ClientProtocol { * @throws SnapshotAccessControlException if path is in RO snapshot * @throws IOException an I/O error occurred */ + @AtMostOnce public boolean rename(String src, String dst) throws UnresolvedLinkException, SnapshotAccessControlException, IOException; @@ -422,6 +428,7 @@ public interface ClientProtocol { * contains a symlink * @throws SnapshotAccessControlException if path is in RO snapshot */ + @AtMostOnce public void concat(String trg, String[] srcs) throws IOException, UnresolvedLinkException, SnapshotAccessControlException; @@ -460,6 +467,7 @@ public interface ClientProtocol { * @throws SnapshotAccessControlException if path is in RO snapshot * @throws IOException If an I/O error occurred */ + @AtMostOnce public void rename2(String src, String dst, Options.Rename... options) throws AccessControlException, DSQuotaExceededException, FileAlreadyExistsException, FileNotFoundException, @@ -484,6 +492,7 @@ public interface ClientProtocol { * @throws SnapshotAccessControlException if path is in RO snapshot * @throws IOException If an I/O error occurred */ + @AtMostOnce public boolean delete(String src, boolean recursive) throws AccessControlException, FileNotFoundException, SafeModeException, UnresolvedLinkException, SnapshotAccessControlException, IOException; @@ -704,6 +713,7 @@ public interface ClientProtocol { * @throws AccessControlException if the superuser privilege is violated. * @throws IOException if image creation failed. */ + @AtMostOnce public void saveNamespace() throws AccessControlException, IOException; @@ -725,6 +735,7 @@ public interface ClientProtocol { * * @throws AccessControlException if the superuser privilege is violated. */ + @Idempotent public boolean restoreFailedStorage(String arg) throws AccessControlException, IOException; @@ -732,6 +743,7 @@ public interface ClientProtocol { * Tells the namenode to reread the hosts and exclude files. * @throws IOException */ + @Idempotent public void refreshNodes() throws IOException; /** @@ -741,6 +753,7 @@ public interface ClientProtocol { * * @throws IOException */ + @Idempotent public void finalizeUpgrade() throws IOException; /** @@ -763,6 +776,7 @@ public interface ClientProtocol { * * @throws IOException */ + @Idempotent public void metaSave(String filename) throws IOException; /** @@ -918,6 +932,7 @@ public interface ClientProtocol { * @throws SnapshotAccessControlException if path is in RO snapshot * @throws IOException If an I/O error occurred */ + @AtMostOnce public void createSymlink(String target, String link, FsPermission dirPerm, boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, @@ -965,6 +980,7 @@ public interface ClientProtocol { * @param newNodes datanodes in the pipeline * @throws IOException if any error occurs */ + @AtMostOnce public void updatePipeline(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes) throws IOException; @@ -997,6 +1013,7 @@ public interface ClientProtocol { * @param token delegation token * @throws IOException */ + @Idempotent public void cancelDelegationToken(Token token) throws IOException; @@ -1005,6 +1022,7 @@ public interface ClientProtocol { * DataTransferProtocol to/from DataNodes. * @throws IOException */ + @Idempotent public DataEncryptionKey getDataEncryptionKey() throws IOException; /** @@ -1014,6 +1032,7 @@ public interface ClientProtocol { * @return the snapshot path. * @throws IOException */ + @AtMostOnce public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException; @@ -1023,6 +1042,7 @@ public interface ClientProtocol { * @param snapshotName Name of the snapshot for the snapshottable directory * @throws IOException */ + @AtMostOnce public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException; @@ -1033,6 +1053,7 @@ public interface ClientProtocol { * @param snapshotNewName new name of the snapshot * @throws IOException */ + @AtMostOnce public void renameSnapshot(String snapshotRoot, String snapshotOldName, String snapshotNewName) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 275b7c9a5c5..988590b5af4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.io.retry.AtMostOnce; +import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /********************************************************************** @@ -81,6 +83,7 @@ public interface DatanodeProtocol { * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with * updated registration information */ + @Idempotent public DatanodeRegistration registerDatanode(DatanodeRegistration registration ) throws IOException; @@ -98,6 +101,7 @@ public interface DatanodeProtocol { * @param failedVolumes number of failed volumes * @throws IOException on error */ + @Idempotent public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, int xmitsInProgress, @@ -120,6 +124,7 @@ public interface DatanodeProtocol { * @return - the next command for DN to process. * @throws IOException */ + @Idempotent public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports) throws IOException; @@ -133,6 +138,7 @@ public interface DatanodeProtocol { * writes a new Block here, or another DataNode copies a Block to * this DataNode, it will call blockReceived(). */ + @AtMostOnce public void blockReceivedAndDeleted(DatanodeRegistration registration, String poolId, StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks) @@ -142,21 +148,25 @@ public interface DatanodeProtocol { * errorReport() tells the NameNode about something that has gone * awry. Useful for debugging. */ + @Idempotent public void errorReport(DatanodeRegistration registration, int errorCode, String msg) throws IOException; + @Idempotent public NamespaceInfo versionRequest() throws IOException; /** * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])} * } */ + @Idempotent public void reportBadBlocks(LocatedBlock[] blocks) throws IOException; /** * Commit block synchronization in lease recovery */ + @AtMostOnce public void commitBlockSynchronization(ExtendedBlock block, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java index d9bae652b76..29bbb3b32a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java @@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; +import org.apache.hadoop.io.retry.AtMostOnce; +import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /***************************************************************************** @@ -73,6 +75,7 @@ public interface NamenodeProtocol { * @throws IOException if size is less than or equal to 0 or datanode does not exist */ + @Idempotent public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) throws IOException; @@ -82,6 +85,7 @@ public interface NamenodeProtocol { * @return ExportedBlockKeys containing current block keys * @throws IOException */ + @Idempotent public ExportedBlockKeys getBlockKeys() throws IOException; /** @@ -90,11 +94,13 @@ public interface NamenodeProtocol { * case of a non-active node. * @throws IOException */ + @Idempotent public long getTransactionID() throws IOException; /** * Get the transaction ID of the most recent checkpoint. */ + @Idempotent public long getMostRecentCheckpointTxId() throws IOException; /** @@ -103,6 +109,7 @@ public interface NamenodeProtocol { * @throws IOException * @return a unique token to identify this transaction. */ + @Idempotent public CheckpointSignature rollEditLog() throws IOException; /** @@ -112,6 +119,7 @@ public interface NamenodeProtocol { * of the name-node * @throws IOException */ + @Idempotent public NamespaceInfo versionRequest() throws IOException; /** @@ -124,6 +132,7 @@ public interface NamenodeProtocol { * @param msg free text description of the error * @throws IOException */ + @Idempotent public void errorReport(NamenodeRegistration registration, int errorCode, String msg) throws IOException; @@ -134,6 +143,7 @@ public interface NamenodeProtocol { * @return {@link NamenodeRegistration} of the node, * which this node has just registered with. */ + @Idempotent public NamenodeRegistration registerSubordinateNamenode( NamenodeRegistration registration) throws IOException; @@ -151,6 +161,7 @@ public interface NamenodeProtocol { * @return {@link CheckpointCommand} if checkpoint is allowed. * @throws IOException */ + @AtMostOnce public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException; @@ -162,6 +173,7 @@ public interface NamenodeProtocol { * @param sig {@code CheckpointSignature} which identifies the checkpoint. * @throws IOException */ + @AtMostOnce public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException; @@ -171,6 +183,7 @@ public interface NamenodeProtocol { * available to be fetched from the NameNode. * @param sinceTxId return only logs that contain transactions >= sinceTxId */ + @Idempotent public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java new file mode 100644 index 00000000000..c461e2c8cfb --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import java.lang.reflect.Method; + +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.io.retry.AtMostOnce; +import org.apache.hadoop.io.retry.Idempotent; +import org.junit.Assert; +import org.junit.Test; + +/** + * Tests to make sure all the protocol class public methods have + * either {@link Idempotent} or {@link AtMostOnce} once annotations. + */ +public class TestAnnotations { + @Test + public void checkAnnotations() { + Method[] methods = NamenodeProtocols.class.getMethods(); + for (Method m : methods) { + Assert.assertTrue( + "Idempotent or AtMostOnce annotation is not present " + m, + m.isAnnotationPresent(Idempotent.class) + || m.isAnnotationPresent(AtMostOnce.class)); + } + } +}