HDFS-2676. Remove Avro RPC. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1214102 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-12-14 08:20:23 +00:00
parent b9238e3bad
commit 60dd76c6ab
14 changed files with 5 additions and 107 deletions

View File

@ -1,6 +1,9 @@
Hadoop HDFS Change Log
Trunk (unreleased changes)
INCOMPATIBLE CHANGES
HDFS-2676. Remove Avro RPC. (suresh)
NEW FEATURES
HDFS-395. DFS Scalability: Incremental block reports. (Tomasz Nykiel
via hairong)

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.protocol;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.avro.reflect.Nullable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ContentSummary;
@ -116,7 +114,6 @@ public interface ClientProtocol extends VersionedProtocol {
* @throws UnresolvedLinkException If <code>src</code> contains a symlink
* @throws IOException If an I/O error occurred
*/
@Nullable
public LocatedBlocks getBlockLocations(String src,
long offset,
long length)
@ -311,7 +308,7 @@ public interface ClientProtocol extends VersionedProtocol {
* @throws IOException If an I/O error occurred
*/
public LocatedBlock addBlock(String src, String clientName,
@Nullable ExtendedBlock previous, @Nullable DatanodeInfo[] excludeNodes)
ExtendedBlock previous, DatanodeInfo[] excludeNodes)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException;
@ -690,7 +687,6 @@ public interface ClientProtocol extends VersionedProtocol {
* @return upgrade status information or null if no upgrades are in progress
* @throws IOException
*/
@Nullable
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException;
@ -736,7 +732,6 @@ public interface ClientProtocol extends VersionedProtocol {
* @throws UnresolvedLinkException if the path contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Nullable
public HdfsFileStatus getFileInfo(String src) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;

View File

@ -36,8 +36,6 @@ import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.StringUtils;
import org.apache.avro.reflect.Nullable;
/**
* DatanodeInfo represents the status of a DataNode.
* This object is used for communication in the
@ -57,7 +55,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
/** HostName as supplied by the datanode during registration as its
* name. Namenode uses datanode IP address as the name.
*/
@Nullable
protected String hostName = null;
// administrative states of a datanode
@ -84,10 +81,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
}
}
@Nullable
protected AdminStates adminState;
public DatanodeInfo() {
super();
adminState = null;

View File

@ -31,8 +31,6 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.avro.reflect.Nullable;
/** Interface that represents the over the wire information for a file.
*/
@InterfaceAudience.Private
@ -47,7 +45,6 @@ public class HdfsFileStatus implements Writable {
}
private byte[] path; // local name of the inode that's encoded in java UTF8
@Nullable
private byte[] symlink; // symlink target encoded in java UTF8 or null
private long length;
private boolean isdir;

View File

@ -31,8 +31,6 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.avro.reflect.Nullable;
/**
* Collection of blocks with their locations and the file length.
*/
@ -42,7 +40,6 @@ public class LocatedBlocks implements Writable {
private long fileLength;
private List<LocatedBlock> blocks; // array of blocks with prioritized locations
private boolean underConstruction;
@Nullable
private LocatedBlock lastLocatedBlock = null;
private boolean isLastBlockComplete = false;

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.protocolR23Compatible;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.avro.reflect.Nullable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CreateFlag;
@ -97,7 +95,6 @@ public interface ClientNamenodeWireProtocol extends VersionedProtocol {
* The specification of this method matches that of
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getBlockLocations}
*/
@Nullable
public LocatedBlocksWritable getBlockLocations(String src,
long offset,
long length)
@ -175,7 +172,7 @@ public interface ClientNamenodeWireProtocol extends VersionedProtocol {
* org.apache.hadoop.hdfs.protocol.DatanodeInfo[])}
*/
public LocatedBlockWritable addBlock(String src, String clientName,
@Nullable ExtendedBlockWritable previous, @Nullable DatanodeInfoWritable[] excludeNodes)
ExtendedBlockWritable previous, DatanodeInfoWritable[] excludeNodes)
throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException;
@ -344,7 +341,6 @@ public interface ClientNamenodeWireProtocol extends VersionedProtocol {
* The specification of this method matches that of
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#distributedUpgradeProgress}
*/
@Nullable
public UpgradeStatusReportWritable distributedUpgradeProgress(
UpgradeAction action)
throws IOException;
@ -373,7 +369,6 @@ public interface ClientNamenodeWireProtocol extends VersionedProtocol {
* The specification of this method matches that of
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#getFileInfo(String)}
*/
@Nullable
public HdfsFileStatusWritable getFileInfo(String src)
throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;

View File

@ -34,8 +34,6 @@ import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.avro.reflect.Nullable;
/**
* DatanodeInfo represents the status of a DataNode.
* This object is used for communication in the
@ -55,7 +53,6 @@ public class DatanodeInfoWritable extends DatanodeIDWritable {
/** HostName as supplied by the datanode during registration as its
* name. Namenode uses datanode IP address as the name.
*/
@Nullable
protected String hostName = null;
// administrative states of a datanode
@ -82,7 +79,6 @@ public class DatanodeInfoWritable extends DatanodeIDWritable {
}
}
@Nullable
protected AdminStates adminState;
static public DatanodeInfo convertDatanodeInfo(DatanodeInfoWritable di) {

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.avro.reflect.Nullable;
/** Interface that represents the over the wire information for a file.
*/
@InterfaceAudience.Private
@ -46,7 +44,6 @@ public class HdfsFileStatusWritable implements Writable {
}
private byte[] path; // local name of the inode that's encoded in java UTF8
@Nullable
private byte[] symlink; // symlink target encoded in java UTF8 or null
private long length;
private boolean isdir;

View File

@ -29,8 +29,6 @@ import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.avro.reflect.Nullable;
/**
* Collection of blocks with their locations and the file length.
*/
@ -40,7 +38,6 @@ public class LocatedBlocksWritable implements Writable {
private long fileLength;
private List<LocatedBlockWritable> blocks; // array of blocks with prioritized locations
private boolean underConstruction;
@Nullable
private LocatedBlockWritable lastLocatedBlock = null;
private boolean isLastBlockComplete = false;

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.avro.reflect.Union;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -25,13 +24,6 @@ import org.apache.hadoop.classification.InterfaceStability;
* Base class for data-node command.
* Issued by the name-node to notify data-nodes what should be done.
*/
// Declare subclasses for Avro's denormalized representation
@Union({Void.class,
RegisterCommand.class, FinalizeCommand.class,
BlockCommand.class, UpgradeCommand.class,
BlockRecoveryCommand.class, KeyUpdateCommand.class})
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DatanodeCommand extends ServerCommand {

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.avro.reflect.Nullable;
/**********************************************************************
* Protocol that a DFS datanode uses to communicate with the NameNode.
* It's used to upload current load information and block reports.
@ -107,7 +105,6 @@ public interface DatanodeProtocol extends VersionedProtocol {
* @param failedVolumes number of failed volumes
* @throws IOException on error
*/
@Nullable
public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
long capacity,
long dfsUsed, long remaining,

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.protocolR23Compatible;
import java.io.IOException;
import org.apache.avro.reflect.Nullable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@ -98,7 +97,6 @@ public interface DatanodeWireProtocol extends VersionedProtocol {
* @param failedVolumes number of failed volumes
* @throws IOException on error
*/
@Nullable
public DatanodeCommandWritable[] sendHeartbeat(
DatanodeRegistrationWritable registration, long capacity, long dfsUsed,
long remaining, long blockPoolUsed, int xmitsInProgress,

View File

@ -507,31 +507,6 @@ public class MiniDFSCluster {
this.federation = federation;
this.waitSafeMode = waitSafeMode;
// use alternate RPC engine if spec'd
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
LOG.info("HDFS using RPCEngine: " + rpcEngineName);
try {
Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
setRpcEngine(conf, ClientNamenodeWireProtocol.class, rpcEngine);
setRpcEngine(conf, ClientDatanodeProtocolPB.class, rpcEngine);
setRpcEngine(conf, NamenodeProtocolPB.class, rpcEngine);
setRpcEngine(conf, ClientProtocol.class, rpcEngine);
setRpcEngine(conf, DatanodeProtocolPB.class, rpcEngine);
setRpcEngine(conf, RefreshAuthorizationPolicyProtocol.class, rpcEngine);
setRpcEngine(conf, RefreshUserMappingsProtocol.class, rpcEngine);
setRpcEngine(conf, GetUserMappingsProtocol.class, rpcEngine);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
// disable service authorization, as it does not work with tunnelled RPC
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
false);
}
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import org.junit.Test;
/** Test for simple signs of life using Avro RPC. Not an exhaustive test
* yet, just enough to catch fundamental problems using Avro reflection to
* infer namenode RPC protocols. */
public class TestDfsOverAvroRpc extends TestLocalDFS {
@Test(timeout=20000)
public void testWorkingDirectory() throws IOException {
System.setProperty("hdfs.rpc.engine",
"org.apache.hadoop.ipc.AvroRpcEngine");
super.testWorkingDirectory();
}
}