MAPREDUCE-3368. Fixed test compilation. Contributed by Hitesh Shah.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1199627 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
77a8552ad3
commit
3a4f39cd07
|
@ -42,6 +42,8 @@ Release 0.23.1 - Unreleased
|
|||
MAPREDUCE-3345. Fixed a race condition in ResourceManager that was causing
|
||||
TestContainerManagerSecurity to fail sometimes. (Hitesh Shah via vinodkv)
|
||||
|
||||
MAPREDUCE-3368. Fixed test compilation. (Hitesh Shah via vinodkv)
|
||||
|
||||
Release 0.23.0 - 2011-11-01
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.net.InetAddress;
|
|||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ipc.ProtocolInfo;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.TestRPC.TestImpl;
|
||||
|
@ -124,7 +123,6 @@ public class TestAuditLogger extends TestCase {
|
|||
* A special extension of {@link TestImpl} RPC server with
|
||||
* {@link TestImpl#ping()} testing the audit logs.
|
||||
*/
|
||||
@ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRPC$TestProtocol")
|
||||
private class MyTestRPCServer extends TestImpl {
|
||||
@Override
|
||||
public void ping() {
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -61,7 +60,6 @@ public class TestSubmitJob {
|
|||
new Path(System.getProperty("test.build.data","/tmp"),
|
||||
"job-submission-testing");
|
||||
|
||||
|
||||
/**
|
||||
* Test to verify that jobs with invalid memory requirements are killed at the
|
||||
* JT.
|
||||
|
@ -109,8 +107,9 @@ public class TestSubmitJob {
|
|||
runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L,
|
||||
"Exceeds the cluster's max-memory-limit.");
|
||||
} finally {
|
||||
if (mrCluster != null)
|
||||
if (mrCluster != null) {
|
||||
mrCluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,16 +147,16 @@ public class TestSubmitJob {
|
|||
conf, NetUtils.getSocketFactory(conf, ClientProtocol.class));
|
||||
}
|
||||
|
||||
static ClientNamenodeWireProtocol getDFSClient(
|
||||
static org.apache.hadoop.hdfs.protocol.ClientProtocol getDFSClient(
|
||||
Configuration conf, UserGroupInformation ugi)
|
||||
throws IOException {
|
||||
return (ClientNamenodeWireProtocol)
|
||||
RPC.getProxy(ClientNamenodeWireProtocol.class,
|
||||
ClientNamenodeWireProtocol.versionID,
|
||||
return (org.apache.hadoop.hdfs.protocol.ClientProtocol)
|
||||
RPC.getProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol.class,
|
||||
org.apache.hadoop.hdfs.protocol.ClientProtocol.versionID,
|
||||
NameNode.getAddress(conf), ugi,
|
||||
conf,
|
||||
NetUtils.getSocketFactory(conf,
|
||||
ClientNamenodeWireProtocol.class));
|
||||
org.apache.hadoop.hdfs.protocol.ClientProtocol.class));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -226,7 +225,7 @@ public class TestSubmitJob {
|
|||
UserGroupInformation user2 =
|
||||
TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
|
||||
JobConf conf_other = mr.createJobConf();
|
||||
ClientNamenodeWireProtocol client =
|
||||
org.apache.hadoop.hdfs.protocol.ClientProtocol client =
|
||||
getDFSClient(conf_other, user2);
|
||||
|
||||
// try accessing mapred.system.dir/jobid/*
|
||||
|
|
Loading…
Reference in New Issue