Merge r1233741 through r1234387 from 0.23.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1234406 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-01-21 19:50:30 +00:00
commit 6e8fe7af37
365 changed files with 12260 additions and 2299 deletions

View File

@ -201,6 +201,12 @@ Release 0.23.1 - Unreleased
HADOOP-7971. Adding back job/pipes/queue commands to bin/hadoop for HADOOP-7971. Adding back job/pipes/queue commands to bin/hadoop for
backward compatibility. (Prashath Sharma via acmurthy) backward compatibility. (Prashath Sharma via acmurthy)
HADOOP-7982. UserGroupInformation fails to login if thread's context
classloader can't load HadoopLoginModule. (todd)
HADOOP-7986. Adding config for MapReduce History Server protocol in
hadoop-policy.xml for service level authorization. (Mahadev Konar via vinodkv)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -416,9 +416,19 @@ public class UserGroupInformation {
private static LoginContext private static LoginContext
newLoginContext(String appName, Subject subject) throws LoginException { newLoginContext(String appName, Subject subject) throws LoginException {
return new LoginContext(appName, subject, null, new HadoopConfiguration()); // Temporarily switch the thread's ContextClassLoader to match this
// class's classloader, so that we can properly load HadoopLoginModule
// from the JAAS libraries.
Thread t = Thread.currentThread();
ClassLoader oldCCL = t.getContextClassLoader();
t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
try {
return new LoginContext(appName, subject, null, new HadoopConfiguration());
} finally {
t.setContextClassLoader(oldCCL);
}
} }
private LoginContext getLogin() { private LoginContext getLogin() {
return user.getLogin(); return user.getLogin();
} }

View File

@ -209,4 +209,14 @@
A special value of "*" means all users are allowed.</description> A special value of "*" means all users are allowed.</description>
</property> </property>
<property>
<name>security.mrhs.client.protocol.acl</name>
<value>*</value>
<description>ACL for HSClientProtocol, used by job clients to
communciate with the MR History Server job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
</configuration> </configuration>

View File

@ -1,3 +1,19 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter> <FindBugsFilter>
<Match> <Match>
<Class name="org.apache.hadoop.lib.service.instrumentation.InstrumentationService" /> <Class name="org.apache.hadoop.lib.service.instrumentation.InstrumentationService" />

View File

@ -199,6 +199,8 @@ Release 0.23.1 - UNRELEASED
HDFS-2803. Add logging to LeaseRenewer for better lease expiration debugging. HDFS-2803. Add logging to LeaseRenewer for better lease expiration debugging.
(Jimmy Xiang via todd) (Jimmy Xiang via todd)
HDFS-2817. Combine the two TestSafeMode test suites. (todd)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd) HDFS-2130. Switch default checksum to CRC32C. (todd)
@ -266,6 +268,14 @@ Release 0.23.1 - UNRELEASED
HDFS-2790. FSNamesystem.setTimes throws exception with wrong HDFS-2790. FSNamesystem.setTimes throws exception with wrong
configuration name in the message. (Arpit Gupta via eli) configuration name in the message. (Arpit Gupta via eli)
HDFS-2810. Leases not getting renewed properly by clients (todd)
HDFS-2751. Datanode may incorrectly drop OS cache behind reads
even for short reads. (todd)
HDFS-2816. Fix missing license header in httpfs findbugsExcludeFile.xml.
(hitesh via tucu)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -373,11 +373,17 @@ public class DFSClient implements java.io.Closeable {
return clientRunning; return clientRunning;
} }
/** Renew leases */ /**
void renewLease() throws IOException { * Renew leases.
* @return true if lease was renewed. May return false if this
* client has been closed or has no files open.
**/
boolean renewLease() throws IOException {
if (clientRunning && !isFilesBeingWrittenEmpty()) { if (clientRunning && !isFilesBeingWrittenEmpty()) {
namenode.renewLease(clientName); namenode.renewLease(clientName);
return true;
} }
return false;
} }
/** /**

View File

@ -67,7 +67,7 @@ import org.apache.hadoop.util.StringUtils;
* </p> * </p>
*/ */
class LeaseRenewer { class LeaseRenewer {
private static final Log LOG = LogFactory.getLog(LeaseRenewer.class); static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L; static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L; static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
@ -407,7 +407,13 @@ class LeaseRenewer {
final DFSClient c = copies.get(i); final DFSClient c = copies.get(i);
//skip if current client name is the same as the previous name. //skip if current client name is the same as the previous name.
if (!c.getClientName().equals(previousName)) { if (!c.getClientName().equals(previousName)) {
c.renewLease(); if (!c.renewLease()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Did not renew lease for client " +
c);
}
continue;
}
previousName = c.getClientName(); previousName = c.getClientName();
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewed for client " + previousName); LOG.debug("Lease renewed for client " + previousName);

View File

@ -315,7 +315,7 @@ class BlockSender implements java.io.Closeable {
* close opened files. * close opened files.
*/ */
public void close() throws IOException { public void close() throws IOException {
if (blockInFd != null && shouldDropCacheBehindRead) { if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
// drop the last few MB of the file from cache // drop the last few MB of the file from cache
try { try {
NativeIO.posixFadviseIfPossible( NativeIO.posixFadviseIfPossible(

View File

@ -17,11 +17,14 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.junit.Assert.*;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -29,6 +32,8 @@ import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
import com.google.common.base.Supplier;
public class TestLeaseRenewer { public class TestLeaseRenewer {
private String FAKE_AUTHORITY="hdfs://nn1/"; private String FAKE_AUTHORITY="hdfs://nn1/";
private UserGroupInformation FAKE_UGI_A = private UserGroupInformation FAKE_UGI_A =
@ -46,19 +51,24 @@ public class TestLeaseRenewer {
@Before @Before
public void setupMocksAndRenewer() throws IOException { public void setupMocksAndRenewer() throws IOException {
MOCK_DFSCLIENT = Mockito.mock(DFSClient.class); MOCK_DFSCLIENT = createMockClient();
Mockito.doReturn(true)
.when(MOCK_DFSCLIENT).isClientRunning();
Mockito.doReturn((int)FAST_GRACE_PERIOD)
.when(MOCK_DFSCLIENT).getHdfsTimeout();
Mockito.doReturn("myclient")
.when(MOCK_DFSCLIENT).getClientName();
renewer = LeaseRenewer.getInstance( renewer = LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT); FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD); renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
} }
private DFSClient createMockClient() {
DFSClient mock = Mockito.mock(DFSClient.class);
Mockito.doReturn(true)
.when(mock).isClientRunning();
Mockito.doReturn((int)FAST_GRACE_PERIOD)
.when(mock).getHdfsTimeout();
Mockito.doReturn("myclient")
.when(mock).getClientName();
return mock;
}
@Test @Test
public void testInstanceSharing() throws IOException { public void testInstanceSharing() throws IOException {
// Two lease renewers with the same UGI should return // Two lease renewers with the same UGI should return
@ -93,11 +103,11 @@ public class TestLeaseRenewer {
public void testRenewal() throws Exception { public void testRenewal() throws Exception {
// Keep track of how many times the lease gets renewed // Keep track of how many times the lease gets renewed
final AtomicInteger leaseRenewalCount = new AtomicInteger(); final AtomicInteger leaseRenewalCount = new AtomicInteger();
Mockito.doAnswer(new Answer<Void>() { Mockito.doAnswer(new Answer<Boolean>() {
@Override @Override
public Void answer(InvocationOnMock invocation) throws Throwable { public Boolean answer(InvocationOnMock invocation) throws Throwable {
leaseRenewalCount.incrementAndGet(); leaseRenewalCount.incrementAndGet();
return null; return true;
} }
}).when(MOCK_DFSCLIENT).renewLease(); }).when(MOCK_DFSCLIENT).renewLease();
@ -120,6 +130,57 @@ public class TestLeaseRenewer {
renewer.closeFile(filePath, MOCK_DFSCLIENT); renewer.closeFile(filePath, MOCK_DFSCLIENT);
} }
/**
* Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
* to several DFSClients with the same name, the first of which has no files
* open. Previously, this was causing the lease to not get renewed.
*/
@Test
public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
// First DFSClient has no files open so doesn't renew leases.
final DFSClient mockClient1 = createMockClient();
Mockito.doReturn(false).when(mockClient1).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
String filePath = "/foo";
renewer.put(filePath, mockStream1, mockClient1);
// Second DFSClient does renew lease
final DFSClient mockClient2 = createMockClient();
Mockito.doReturn(true).when(mockClient2).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(
FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
renewer.put(filePath, mockStream2, mockClient2);
// Wait for lease to get renewed
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
return true;
} catch (AssertionError err) {
LeaseRenewer.LOG.warn("Not yet satisfied", err);
return false;
} catch (IOException e) {
// should not throw!
throw new RuntimeException(e);
}
}
}, 100, 10000);
renewer.closeFile(filePath, mockClient1);
renewer.closeFile(filePath, mockClient2);
}
@Test @Test
public void testThreadName() throws Exception { public void testThreadName() throws Exception {
DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class); DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);

View File

@ -113,6 +113,21 @@ public class TestSafeMode {
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
} }
/**
* Test that, if there are no blocks in the filesystem,
* the NameNode doesn't enter the "safemode extension" period.
*/
@Test(timeout=45000)
public void testNoExtensionIfNoBlocks() throws IOException {
cluster.getConfiguration(0).setInt(
DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 60000);
cluster.restartNameNode();
// Even though we have safemode extension set high, we should immediately
// exit safemode on startup because there are no blocks in the namespace.
String status = cluster.getNameNode().getNamesystem().getSafemode();
assertEquals("", status);
}
public interface FSRun { public interface FSRun {
public abstract void run(FileSystem fs) throws IOException; public abstract void run(FileSystem fs) throws IOException;
} }
@ -193,5 +208,37 @@ public class TestSafeMode {
assertFalse("Could not leave SM", assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE)); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
} }
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test
public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf = cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
cluster.restartNameNode();
fs = (DistributedFileSystem)cluster.getFileSystem();
String tipMsg = cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message looks right: " + tipMsg,
tipMsg.contains("The number of live datanodes 0 needs an additional " +
"2 live datanodes to reach the minimum number 1. " +
"Safe mode will be turned off automatically."));
// Start a datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Wait long enough for safemode check to refire
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
// We now should be out of safe mode.
assertEquals("", cluster.getNamesystem().getSafemode());
}
} }

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Tests to verify safe mode correctness.
*/
public class TestSafeMode {
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test
public void testDatanodeThreshold() throws IOException {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
// bring up a cluster with no datanodes
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
cluster.waitActive();
fs = (DistributedFileSystem)cluster.getFileSystem();
assertTrue("No datanode started, but we require one - safemode expected",
fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
String tipMsg = cluster.getNamesystem().getSafeModeTip();
assertTrue("Safemode tip message looks right",
tipMsg.contains("The number of live datanodes 0 needs an additional " +
"2 live datanodes to reach the minimum number 1. " +
"Safe mode will be turned off automatically."));
// Start a datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Wait long enough for safemode check to refire
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
// We now should be out of safe mode.
assertFalse(
"Out of safe mode after starting datanode.",
fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
} finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
}

View File

@ -30,8 +30,13 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3251. Network ACLs can prevent some clients to talk to MR ApplicationMaster. MAPREDUCE-3251. Network ACLs can prevent some clients to talk to MR ApplicationMaster.
(Anupam Seth via mahadev) (Anupam Seth via mahadev)
MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk)
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-3375. [Gridmix] Memory Emulation system tests. MAPREDUCE-3597. [Rumen] Rumen should provide APIs to access all the
job-history related information.
MAPREDUCE-3375. [Gridmix] Memory Emulation system tests.
(Vinay Thota via amarrk) (Vinay Thota via amarrk)
MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests. MAPREDUCE-2733. [Gridmix] Gridmix3 cpu emulation system tests.
@ -476,6 +481,24 @@ Release 0.23.1 - Unreleased
MAPREDUCE-3684. LocalDistributedCacheManager does not shut down its thread MAPREDUCE-3684. LocalDistributedCacheManager does not shut down its thread
pool (tomwhite) pool (tomwhite)
MAPREDUCE-3582. Move successfully passing MR1 tests to MR2 maven tree.
(ahmed via tucu)
MAPREDUCE-3698. Client cannot talk to the history server in secure mode.
(mahadev)
MAPREDUCE-3689. RM web UI doesn't handle newline in job name.
(Thomas Graves via mahadev)
MAPREDUCE-3701. Delete HadoopYarnRPC from 0.23 branch.
(mahadev)
MAPREDUCE-3549. write api documentation for web service apis for RM, NM,
mapreduce app master, and job history server (Thomas Graves via mahadev)
MAPREDUCE-3705. ant build fails on 0.23 branch. (Thomas Graves via
mahadev)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -575,8 +575,6 @@
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tar" todir="${test.cache.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tgz" todir="${test.cache.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/test.tar.gz" todir="${test.cache.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/cli/testMRConf.xml" todir="${test.cache.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/cli/data60bytes" todir="${test.cache.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/concat.bz2" todir="${test.concat.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/concat.bz2" todir="${test.concat.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/concat.gz" todir="${test.concat.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/concat.gz" todir="${test.concat.data}"/>
<copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2" todir="${test.concat.data}"/> <copy file="${test.src.dir}/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2" todir="${test.concat.data}"/>

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.yarn.proto.HSClientProtocol;
/**
* {@link PolicyProvider} for YARN MapReduce protocols.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ClientHSPolicyProvider extends PolicyProvider {
private static final Service[] mrHSServices =
new Service[] {
new Service(
JHAdminConfig.MR_HS_SECURITY_SERVICE_AUTHORIZATION,
HSClientProtocol.HSClientProtocolService.BlockingInterface.class)
};
@Override
public Service[] getServices() {
return mrHSServices;
}
}

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
@XmlRootElement(name = "JobTaskAttemptCounters") @XmlRootElement(name = "jobTaskAttemptCounters")
@XmlAccessorType(XmlAccessType.FIELD) @XmlAccessorType(XmlAccessType.FIELD)
public class JobTaskAttemptCounterInfo { public class JobTaskAttemptCounterInfo {

View File

@ -629,7 +629,7 @@ public class TestAMWebServicesAttempts extends JerseyTest {
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class); JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length()); assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("JobTaskAttemptCounters"); JSONObject info = json.getJSONObject("jobTaskAttemptCounters");
verifyAMJobTaskAttemptCounters(info, att); verifyAMJobTaskAttemptCounters(info, att);
} }
} }
@ -661,7 +661,7 @@ public class TestAMWebServicesAttempts extends JerseyTest {
InputSource is = new InputSource(); InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml)); is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is); Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("JobTaskAttemptCounters"); NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");
verifyAMTaskCountersXML(nodes, att); verifyAMTaskCountersXML(nodes, att);
} }

View File

@ -22,13 +22,20 @@ import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine;
import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService;
public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl public class HSClientProtocolPBClientImpl extends MRClientProtocolPBClientImpl
implements HSClientProtocol { implements HSClientProtocol {
public HSClientProtocolPBClientImpl(long clientVersion, public HSClientProtocolPBClientImpl(long clientVersion,
InetSocketAddress addr, Configuration conf) throws IOException { InetSocketAddress addr, Configuration conf) throws IOException {
super(clientVersion, addr, conf); super();
RPC.setProtocolEngine(conf, HSClientProtocolService.BlockingInterface.class,
ProtoOverHadoopRpcEngine.class);
proxy = (HSClientProtocolService.BlockingInterface)RPC.getProxy(
HSClientProtocolService.BlockingInterface.class, clientVersion, addr, conf);
} }
} }

View File

@ -93,7 +93,9 @@ import com.google.protobuf.ServiceException;
public class MRClientProtocolPBClientImpl implements MRClientProtocol { public class MRClientProtocolPBClientImpl implements MRClientProtocol {
private MRClientProtocolService.BlockingInterface proxy; protected MRClientProtocolService.BlockingInterface proxy;
public MRClientProtocolPBClientImpl() {};
public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, MRClientProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class); RPC.setProtocolEngine(conf, MRClientProtocolService.BlockingInterface.class, ProtoOverHadoopRpcEngine.class);

View File

@ -111,4 +111,9 @@ public class JHAdminConfig {
public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888; public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888;
public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS = public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS =
"0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT; "0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT;
/*
* HS Service Authorization
*/
public static final String MR_HS_SECURITY_SERVICE_AUTHORIZATION =
"security.mrhs.client.protocol.acl";
} }

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.mapreduce.v2.security.client;
import java.lang.annotation.Annotation; import java.lang.annotation.Annotation;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.KerberosInfo;
@ -30,7 +32,7 @@ import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.yarn.proto.HSClientProtocol; import org.apache.hadoop.yarn.proto.HSClientProtocol;
public class ClientHSSecurityInfo extends SecurityInfo { public class ClientHSSecurityInfo extends SecurityInfo {
@Override @Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) { public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
if (!protocol if (!protocol

View File

@ -66,7 +66,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.security.authorize.ClientHSPolicyProvider;
import org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebApp; import org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebApp;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -136,9 +136,9 @@ public class HistoryClientService extends AbstractService {
if (conf.getBoolean( if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) { false)) {
server.refreshServiceAcl(conf, new MRAMPolicyProvider()); server.refreshServiceAcl(conf, new ClientHSPolicyProvider());
} }
server.start(); server.start();
this.bindAddress = this.bindAddress =
NetUtils.createSocketAddr(hostNameResolved.getHostAddress() NetUtils.createSocketAddr(hostNameResolved.getHostAddress()

View File

@ -35,6 +35,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -49,6 +50,7 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.JobACLsManager; import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobSummary; import org.apache.hadoop.mapreduce.jobhistory.JobSummary;
@ -86,6 +88,9 @@ public class JobHistory extends AbstractService implements HistoryContext {
private static final Log LOG = LogFactory.getLog(JobHistory.class); private static final Log LOG = LogFactory.getLog(JobHistory.class);
private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class); private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class);
public static final Pattern CONF_FILENAME_REGEX =
Pattern.compile("(" + JobID.JOBID_REGEX + ")_conf.xml(?:\\.[0-9]+\\.old)?");
public static final String OLD_SUFFIX = ".old";
private static String DONE_BEFORE_SERIAL_TAIL = private static String DONE_BEFORE_SERIAL_TAIL =
JobHistoryUtils.doneSubdirsBeforeSerialTail(); JobHistoryUtils.doneSubdirsBeforeSerialTail();

View File

@ -642,7 +642,7 @@ public class TestHsWebServicesAttempts extends JerseyTest {
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class); JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length()); assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("JobTaskAttemptCounters"); JSONObject info = json.getJSONObject("jobTaskAttemptCounters");
verifyHsJobTaskAttemptCounters(info, att); verifyHsJobTaskAttemptCounters(info, att);
} }
} }
@ -674,7 +674,7 @@ public class TestHsWebServicesAttempts extends JerseyTest {
InputSource is = new InputSource(); InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml)); is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is); Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("JobTaskAttemptCounters"); NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");
verifyHsTaskCountersXML(nodes, att); verifyHsTaskCountersXML(nodes, att);
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*; import org.apache.hadoop.mapred.*;
import org.junit.Ignore;
/** /**
* Distributed i/o benchmark. * Distributed i/o benchmark.
@ -66,6 +67,7 @@ import org.apache.hadoop.mapred.*;
* <li>standard i/o rate deviation</li> * <li>standard i/o rate deviation</li>
* </ul> * </ul>
*/ */
@Ignore
public class DFSCIOTest extends TestCase { public class DFSCIOTest extends TestCase {
// Constants // Constants
private static final Log LOG = LogFactory.getLog(DFSCIOTest.class); private static final Log LOG = LogFactory.getLog(DFSCIOTest.class);

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapred.*; import org.apache.hadoop.mapred.*;
import org.junit.Ignore;
/** /**
* Distributed checkup of the file system consistency. * Distributed checkup of the file system consistency.
@ -52,6 +53,7 @@ import org.apache.hadoop.mapred.*;
* Optionally displays statistics on read performance. * Optionally displays statistics on read performance.
* *
*/ */
@Ignore
public class DistributedFSCheck extends TestCase { public class DistributedFSCheck extends TestCase {
// Constants // Constants
private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class); private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class);

View File

@ -35,10 +35,12 @@ import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.net.StandardSocketFactory; import org.apache.hadoop.net.StandardSocketFactory;
import org.junit.Ignore;
/** /**
* This class checks that RPCs can use specialized socket factories. * This class checks that RPCs can use specialized socket factories.
*/ */
@Ignore
public class TestSocketFactory extends TestCase { public class TestSocketFactory extends TestCase {
/** /**

View File

@ -39,7 +39,8 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.TaskCounter; import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Ignore;
@Ignore
public class TestBadRecords extends ClusterMapReduceTestCase { public class TestBadRecords extends ClusterMapReduceTestCase {
private static final Log LOG = private static final Log LOG =

View File

@ -20,9 +20,12 @@ package org.apache.hadoop.mapred;
import java.io.IOException; import java.io.IOException;
import org.junit.Ignore;
/** /**
* Tests Job end notification in cluster mode. * Tests Job end notification in cluster mode.
*/ */
@Ignore
public class TestClusterMRNotification extends NotificationTestCase { public class TestClusterMRNotification extends NotificationTestCase {
public TestClusterMRNotification() throws IOException { public TestClusterMRNotification() throws IOException {

View File

@ -21,10 +21,11 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.junit.Ignore;
import java.io.*; import java.io.*;
import java.util.Properties; import java.util.Properties;
@Ignore
public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase { public class TestClusterMapReduceTestCase extends ClusterMapReduceTestCase {
public void _testMapReduce(boolean restart) throws Exception { public void _testMapReduce(boolean restart) throws Exception {
OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt"));

View File

@ -28,12 +28,13 @@ import org.apache.hadoop.fs.*;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Ignore;
/** /**
* check for the job submission options of * check for the job submission options of
* -libjars -files -archives * -libjars -files -archives
*/ */
@Ignore
public class TestCommandLineJobSubmission extends TestCase { public class TestCommandLineJobSubmission extends TestCase {
// Input output paths for this.. // Input output paths for this..
// these are all dummy and does not test // these are all dummy and does not test

View File

@ -36,12 +36,13 @@ import org.apache.hadoop.io.compress.*;
import org.apache.hadoop.util.LineReader; import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import static junit.framework.Assert.*; import static junit.framework.Assert.*;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@Ignore
public class TestConcatenatedCompressedInput { public class TestConcatenatedCompressedInput {
private static final Log LOG = private static final Log LOG =
LogFactory.getLog(TestConcatenatedCompressedInput.class.getName()); LogFactory.getLog(TestConcatenatedCompressedInput.class.getName());

Some files were not shown because too many files have changed in this diff Show More