YARN-825. Fixed javadoc and annotations for yarn-common module. Contributed by Vinod Kumar Vavilapalli.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1493634 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2013-06-17 03:32:49 +00:00
parent ff70f912f7
commit f5f8f3bca4
143 changed files with 718 additions and 264 deletions

View File

@ -387,6 +387,8 @@ Release 2.1.0-beta - UNRELEASED
YARN-752. In AMRMClient, automatically add corresponding rack requests for
requested nodes. (sandyr via tucu)
YARN-825. Fixed javadoc and annotations for yarn-common module. (vinodkv)
OPTIMIZATIONS
YARN-512. Log aggregation root directory check is more expensive than it

View File

@ -18,7 +18,15 @@
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* A simple clock interface that gives you time.
*/
@Public
@Stable
public interface Clock {
long getTime();
}
}

View File

@ -18,9 +18,11 @@
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.Records;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class ClusterInfo {
private Resource maxContainerCapability;

View File

@ -22,16 +22,17 @@ import java.io.File;
import java.util.LinkedList;
import java.util.Queue;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.log4j.FileAppender;
import org.apache.log4j.spi.LoggingEvent;
/**
* A simple log4j-appender for the task child's
* map-reduce system logs.
* A simple log4j-appender for container's logs.
*
*/
@InterfaceStability.Unstable
@Public
@Unstable
public class ContainerLogAppender extends FileAppender {
private String containerLogDir;
//so that log4j can configure it from the configuration(log4j.properties).

View File

@ -17,6 +17,15 @@
*/
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* Implementation of {@link Clock} that gives the current time from the system
* clock in milliseconds.
*/
@Public
@Stable
public class SystemClock implements Clock {
public long getTime() {

View File

@ -22,6 +22,8 @@ import java.lang.Thread.UncaughtExceptionHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ShutdownHookManager;
@ -34,6 +36,8 @@ import org.apache.hadoop.util.ShutdownHookManager;
* Note: Right now it only will shut down the program if a Error is caught, but
* not any other exception. Anything else is just logged.
*/
@Public
@Evolving
public class YarnUncaughtExceptionHandler implements UncaughtExceptionHandler {
private static final Log LOG = LogFactory.getLog(YarnUncaughtExceptionHandler.class);

View File

@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@ -92,6 +93,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestPr
import com.google.protobuf.ServiceException;
@Private
public class ApplicationClientProtocolPBClientImpl implements ApplicationClientProtocol,
Closeable {

View File

@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterR
import com.google.protobuf.ServiceException;
@Private
public class ApplicationMasterProtocolPBClientImpl implements ApplicationMasterProtocol, Closeable {
private ApplicationMasterProtocolPB proxy;

View File

@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@ -50,6 +51,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerRequestProto;
import com.google.protobuf.ServiceException;
@Private
public class ContainerManagementProtocolPBClientImpl implements ContainerManagementProtocol,
Closeable {

View File

@ -22,6 +22,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
@ -65,7 +66,7 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Refre
import com.google.protobuf.ServiceException;
@Private
public class ResourceManagerAdministrationProtocolPBClientImpl implements ResourceManagerAdministrationProtocol, Closeable {
private ResourceManagerAdministrationProtocolPB proxy;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
@ -87,6 +88,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseP
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class ApplicationClientProtocolPBServiceImpl implements ApplicationClientProtocolPB {
private ApplicationClientProtocol real;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterR
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class ApplicationMasterProtocolPBServiceImpl implements ApplicationMasterProtocolPB {
private ApplicationMasterProtocol real;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainerResponseProto
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class ContainerManagementProtocolPBServiceImpl implements ContainerManagementProtocolPB {
private ContainerManagementProtocol real;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.api.impl.pb.service;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.RefreshAdminAclsResponse;
@ -41,11 +42,25 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshSuperUserGroups
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@Private
public class ResourceManagerAdministrationProtocolPBServiceImpl implements ResourceManagerAdministrationProtocolPB {
private ResourceManagerAdministrationProtocol real;

View File

@ -18,9 +18,14 @@
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* parent class of all the events. All events extend this class.
* Parent class of all the events. All events extend this class.
*/
@Public
@Evolving
public abstract class AbstractEvent<TYPE extends Enum<TYPE>>
implements Event<TYPE> {

View File

@ -27,17 +27,21 @@ import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.service.AbstractService;
/**
* Dispatches events in a separate thread. Currently only single thread does
* that. Potentially there could be multiple channels for each event type
* Dispatches {@link Event}s in a separate thread. Currently only single thread
* does that. Potentially there could be multiple channels for each event type
* class and a thread pool can be used to dispatch the events.
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public class AsyncDispatcher extends AbstractService implements Dispatcher {
private static final Log LOG = LogFactory.getLog(AsyncDispatcher.class);

View File

@ -18,12 +18,17 @@
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Event Dispatcher interface. It dispatches events to registered
* event handlers based on event types.
*
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public interface Dispatcher {
// Configuration to make sure dispatcher crashes but doesn't do system-exit in

View File

@ -18,10 +18,15 @@
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Interface defining events api.
*
*/
@Public
@Evolving
public interface Event<TYPE extends Enum<TYPE>> {
TYPE getType();

View File

@ -18,11 +18,17 @@
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Interface for handling events of type T
*
* @param <T> paremeterized event of type T
* @param <T> parameterized event of type T
*/
@SuppressWarnings("rawtypes")
@Public
@Evolving
public interface EventHandler<T extends Event> {
void handle(T event);

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
package org.apache.hadoop.yarn.event;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -20,8 +20,10 @@ package org.apache.hadoop.yarn.factories;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public interface RpcClientFactory {
public Object getClient(Class<?> protocol, long clientVersion,

View File

@ -20,11 +20,13 @@ package org.apache.hadoop.yarn.factories;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public interface RpcServerFactory {
public Server getServer(Class<?> protocol, Object instance,

View File

@ -23,10 +23,12 @@ import java.lang.reflect.InvocationTargetException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@Private
public class RecordFactoryPBImpl implements RecordFactory {
private static final String PB_IMPL_PACKAGE_SUFFIX = "impl.pb";

View File

@ -27,10 +27,12 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RpcClientFactory;
@Private
public class RpcClientFactoryPBImpl implements RpcClientFactory {
private static final Log LOG = LogFactory

View File

@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.Server;
@ -39,6 +40,7 @@ import org.apache.hadoop.yarn.factories.RpcServerFactory;
import com.google.protobuf.BlockingService;
@Private
public class RpcServerFactoryPBImpl implements RpcServerFactory {
private static final Log LOG = LogFactory.getLog(RpcServerFactoryPBImpl.class);

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.factories;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.factory.providers;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@ -30,6 +31,7 @@ import org.apache.hadoop.yarn.factories.RpcServerFactory;
/**
* A public static get() method must be present in the Client/Server Factory implementation.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class RpcFactoryProvider {
private RpcFactoryProvider() {

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.factory.providers;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
@ -35,6 +36,7 @@ import org.apache.hadoop.yarn.factory.providers.RpcFactoryProvider;
* RPC wire format is non-standard, but it does permit use of Protocol Buffers
* protocol versioning features for inter-Java RPCs.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class HadoopYarnProtoRPC extends YarnRPC {
private static final Log LOG = LogFactory.getLog(HadoopYarnProtoRPC.class);

View File

@ -22,11 +22,13 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import com.google.protobuf.ServiceException;
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public class RPCUtil {
/**

View File

@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.SecretManager;
@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* Abstraction to get the RPC implementation for Yarn.
*/
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
public abstract class YarnRPC {
private static final Log LOG = LogFactory.getLog(YarnRPC.class);

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate({ "MapReduce", "YARN" })
package org.apache.hadoop.yarn.ipc;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -24,6 +24,7 @@ import java.util.TimerTask;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@ -32,6 +33,10 @@ import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.service.AbstractService;
/**
* A service that periodically deletes aggregated logs.
*/
@Private
public class AggregatedLogDeletionService extends AbstractService {
private static final Log LOG = LogFactory.getLog(AggregatedLogDeletionService.class);

View File

@ -42,6 +42,9 @@ import java.util.Map.Entry;
import org.apache.commons.io.input.BoundedInputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
@ -60,9 +63,11 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.ConverterUtils;
@Public
@Evolving
public class AggregatedLogFormat {
static final Log LOG = LogFactory.getLog(AggregatedLogFormat.class);
private static final Log LOG = LogFactory.getLog(AggregatedLogFormat.class);
private static final LogKey APPLICATION_ACL_KEY = new LogKey("APPLICATION_ACL");
private static final LogKey APPLICATION_OWNER_KEY = new LogKey("APPLICATION_OWNER");
private static final LogKey VERSION_KEY = new LogKey("VERSION");
@ -84,7 +89,8 @@ public class AggregatedLogFormat {
RESERVED_KEYS.put(APPLICATION_OWNER_KEY.toString(), APPLICATION_OWNER_KEY);
RESERVED_KEYS.put(VERSION_KEY.toString(), VERSION_KEY);
}
@Public
public static class LogKey implements Writable {
private String keyString;
@ -118,11 +124,13 @@ public class AggregatedLogFormat {
return false;
}
@Private
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.keyString);
}
@Private
@Override
public void readFields(DataInput in) throws IOException {
this.keyString = in.readUTF();
@ -134,6 +142,7 @@ public class AggregatedLogFormat {
}
}
@Private
public static class LogValue {
private final List<String> rootLogDirs;
@ -207,6 +216,10 @@ public class AggregatedLogFormat {
}
}
/**
* The writer that writes out the aggregated logs.
*/
@Private
public static class LogWriter {
private final FSDataOutputStream fsDataOStream;
@ -295,6 +308,8 @@ public class AggregatedLogFormat {
}
}
@Public
@Evolving
public static class LogReader {
private final FSDataInputStream fsDataIStream;
@ -411,6 +426,7 @@ public class AggregatedLogFormat {
* logs could not be found
* @throws IOException
*/
@Private
public ContainerLogsReader getContainerLogsReader(
ContainerId containerId) throws IOException {
ContainerLogsReader logReader = null;
@ -559,6 +575,7 @@ public class AggregatedLogFormat {
}
}
@Private
public static class ContainerLogsReader {
private DataInputStream valueStream;
private String currentLogType = null;

View File

@ -18,6 +18,12 @@
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
/**
* This API is not exposed to end-users yet.
*/
public enum ContainerLogsRetentionPolicy {
APPLICATION_MASTER_ONLY, AM_AND_FAILED_CONTAINERS_ONLY, ALL_CONTAINERS
}

View File

@ -18,12 +18,14 @@
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@Private
public class LogAggregationUtils {
/**

View File

@ -31,6 +31,9 @@ import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileContext;
@ -47,6 +50,10 @@ import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
@Public
@Evolving
public class LogDumper extends Configured implements Tool {
private static final String CONTAINER_ID_OPTION = "containerId";
@ -133,6 +140,8 @@ public class LogDumper extends Configured implements Tool {
return resultCode;
}
@Private
@VisibleForTesting
public int dumpAContainersLogs(String appId, String containerId,
String nodeId, String jobOwner) throws IOException {
Path remoteRootLogDir =

View File

@ -0,0 +1,21 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -0,0 +1,21 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -23,12 +23,14 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@Private
public class AdminACLsManager {
/**

View File

@ -24,6 +24,8 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -35,6 +37,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
* ApplicationTokenIdentifier is the TokenIdentifier to be used by
* ApplicationMasters to authenticate to the ResourceManager.
*/
@Public
@Evolving
public class ApplicationTokenIdentifier extends TokenIdentifier {
public static final Text KIND_NAME = new Text("YARN_APPLICATION_TOKEN");

View File

@ -22,11 +22,15 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
@Public
@Evolving
public class ApplicationTokenSelector implements
TokenSelector<ApplicationTokenIdentifier> {

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.security;
import java.lang.annotation.Annotation;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
@ -28,6 +30,8 @@ import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
@Public
@Stable
public class ContainerManagerSecurityInfo extends SecurityInfo {
@Override

View File

@ -25,6 +25,8 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -39,6 +41,8 @@ import org.apache.hadoop.yarn.api.records.Resource;
* {@link Resource} needed by the container and the target NMs host-address.
*
*/
@Public
@Evolving
public class ContainerTokenIdentifier extends TokenIdentifier {
private static Log LOG = LogFactory.getLog(ContainerTokenIdentifier.class);

View File

@ -22,11 +22,15 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
@Public
@Stable
public class ContainerTokenSelector implements
TokenSelector<ContainerTokenIdentifier> {

View File

@ -25,6 +25,8 @@ import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
@ -35,7 +37,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Token;
@Public
@Evolving
public class NMTokenIdentifier extends TokenIdentifier {
private static Log LOG = LogFactory.getLog(NMTokenIdentifier.class);

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.security;
import java.lang.annotation.Annotation;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
@ -28,6 +30,8 @@ import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
@Public
@Stable
public class SchedulerSecurityInfo extends SecurityInfo {
@Override

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.security.admin;
import java.lang.annotation.Annotation;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
@ -27,6 +29,8 @@ import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.yarn.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@Public
@Stable
public class AdminSecurityInfo extends SecurityInfo {
@Override

View File

@ -0,0 +1,21 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.security.admin;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -20,15 +20,29 @@ package org.apache.hadoop.yarn.security.client;
import javax.crypto.SecretKey;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
/**
* A base {@link SecretManager} for AMs to extend and validate Client-RM tokens
* issued to clients by the RM using the underlying master-key shared by RM to
* the AMs on their launch. All the methods are called by either Hadoop RPC or
* YARN, so this class is strictly for the purpose of inherit/extend and
* register with Hadoop RPC.
*/
@Public
@Evolving
public abstract class BaseClientToAMTokenSecretManager extends
SecretManager<ClientTokenIdentifier> {
@Private
public abstract SecretKey getMasterKey(
ApplicationAttemptId applicationAttemptId);
@Private
@Override
public synchronized byte[] createPassword(
ClientTokenIdentifier identifier) {
@ -36,6 +50,7 @@ public abstract class BaseClientToAMTokenSecretManager extends
getMasterKey(identifier.getApplicationAttemptID()));
}
@Private
@Override
public byte[] retrievePassword(ClientTokenIdentifier identifier)
throws SecretManager.InvalidToken {
@ -46,6 +61,7 @@ public abstract class BaseClientToAMTokenSecretManager extends
return createPassword(identifier.getBytes(), masterKey);
}
@Private
@Override
public ClientTokenIdentifier createIdentifier() {
return new ClientTokenIdentifier();

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.security.client;
import java.lang.annotation.Annotation;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
@ -29,6 +31,8 @@ import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.yarn.api.ApplicationClientProtocolPB;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@Public
@Stable
public class ClientRMSecurityInfo extends SecurityInfo {
@Override

View File

@ -20,9 +20,20 @@ package org.apache.hadoop.yarn.security.client;
import javax.crypto.SecretKey;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
/**
* A simple {@link SecretManager} for AMs to validate Client-RM tokens issued to
* clients by the RM using the underlying master-key shared by RM to the AMs on
* their launch. All the methods are called by either Hadoop RPC or YARN, so
* this class is strictly for the purpose of inherit/extend and register with
* Hadoop RPC.
*/
@Public
@Evolving
public class ClientToAMTokenSecretManager extends
BaseClientToAMTokenSecretManager {

View File

@ -23,6 +23,8 @@ import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -30,6 +32,8 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@Public
@Evolving
public class ClientTokenIdentifier extends TokenIdentifier {
public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN");

View File

@ -22,11 +22,15 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
@Public
@Stable
public class ClientTokenSelector implements
TokenSelector<ClientTokenIdentifier> {

View File

@ -22,11 +22,15 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
@Public
@Stable
public class RMDelegationTokenSelector implements
TokenSelector<RMDelegationTokenIdentifier> {

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
package org.apache.hadoop.yarn.security.client;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
package org.apache.hadoop.yarn.security;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -0,0 +1,21 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.yarn.server.security;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -25,14 +25,19 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
/**
* This is the base implementation class for YARN services.
*/
@Public
@Evolving
public abstract class AbstractService implements Service {
private static final Log LOG = LogFactory.getLog(AbstractService.class);

View File

@ -24,12 +24,15 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* Composition of services.
*/
@Public
@Evolving
public class CompositeService extends AbstractService {
private static final Log LOG = LogFactory.getLog(CompositeService.class);

View File

@ -1,115 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class FilterService implements Service {
private final Service service;
private final long startTime = System.currentTimeMillis();
public FilterService(Service service) {
this.service = service;
}
@Override
public void init(Configuration config) {
service.init(config);
}
@Override
public void start() {
service.start();
}
@Override
public void stop() {
service.stop();
}
@Override
public void close() throws IOException {
service.close();
}
@Override
public void registerServiceListener(ServiceStateChangeListener listener) {
service.registerServiceListener(listener);
}
@Override
public void unregisterServiceListener(ServiceStateChangeListener listener) {
service.unregisterServiceListener(listener);
}
@Override
public String getName() {
return service.getName();
}
@Override
public Configuration getConfig() {
return service.getConfig();
}
@Override
public STATE getServiceState() {
return service.getServiceState();
}
@Override
public long getStartTime() {
return startTime;
}
@Override
public boolean isInState(STATE state) {
return service.isInState(state);
}
@Override
public Throwable getFailureCause() {
return service.getFailureCause();
}
@Override
public STATE getFailureState() {
return service.getFailureState();
}
@Override
public boolean waitForServiceToStop(long timeout) {
return service.waitForServiceToStop(timeout);
}
@Override
public List<LifecycleEvent> getLifecycleHistory() {
return service.getLifecycleHistory();
}
@Override
public Map<String, String> getBlockers() {
return service.getBlockers();
}
}

View File

@ -19,11 +19,19 @@ package org.apache.hadoop.yarn.service;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* A serializable lifecycle event: the time a state
* transition occurred, and what state was entered.
*/
@Public
@Evolving
public class LifecycleEvent implements Serializable {
private static final long serialVersionUID = 1648576996238247836L;
/**
* Local time in milliseconds when the event occurred
*/

View File

@ -20,10 +20,14 @@ package org.apache.hadoop.yarn.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* This is a state change listener that logs events at INFO level
*/
@Public
@Evolving
public class LoggingStateChangeListener implements ServiceStateChangeListener {
private static final Log LOG = LogFactory.getLog(LoggingStateChangeListener.class);

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import java.io.Closeable;
@ -28,6 +30,8 @@ import java.util.Map;
/**
* Service LifeCycle.
*/
@Public
@Evolving
public interface Service extends Closeable {
/**

View File

@ -18,19 +18,20 @@
package org.apache.hadoop.yarn.service;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ShutdownHookManager;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* This class contains a set of methods to work with services, especially
* to walk them through their lifecycle.
*/
@Public
@Evolving
public final class ServiceOperations {
private static final Log LOG = LogFactory.getLog(AbstractService.class);

View File

@ -18,9 +18,14 @@
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Stable;
/**
* Interface to notify state changes of a service.
*/
@Public
@Stable
public interface ServiceStateChangeListener {
/**

View File

@ -18,14 +18,20 @@
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* Exception that is raised on state change operations.
*/
@Public
@Evolving
public class ServiceStateException extends YarnRuntimeException {
private static final long serialVersionUID = 1110000352259232646L;
public ServiceStateException(String message) {
super(message);
}

View File

@ -18,9 +18,14 @@
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Implements the service state model for YARN.
*/
@Public
@Evolving
public class ServiceStateModel {
/**

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
package org.apache.hadoop.yarn.service;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -18,11 +18,17 @@
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@Public
@Evolving
public class InvalidStateTransitonException extends YarnRuntimeException {
private Enum<?> currentState;
private static final long serialVersionUID = 8610511635996283691L;
private Enum<?> currentState;
private Enum<?> event;
public InvalidStateTransitonException(Enum<?> currentState, Enum<?> event) {

View File

@ -18,13 +18,16 @@
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Hook for Transition.
* Post state is decided by Transition hook. Post state must be one of the
* valid post states registered in StateMachine.
*/
@Public
@Evolving
public interface MultipleArcTransition
<OPERAND, EVENT, STATE extends Enum<STATE>> {

View File

@ -18,11 +18,15 @@
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* Hook for Transition. This lead to state machine to move to
* the post state as registered in the state machine.
*/
@Public
@Evolving
public interface SingleArcTransition<OPERAND, EVENT> {
/**
* Transition hook.

View File

@ -18,6 +18,11 @@
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
public interface StateMachine
<STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {

View File

@ -26,6 +26,8 @@ import java.util.Map.Entry;
import java.util.Set;
import java.util.Stack;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.util.Graph;
/**
@ -40,6 +42,8 @@ import org.apache.hadoop.yarn.util.Graph;
* @param <EVENT> The event object.
*
*/
@Public
@Evolving
final public class StateMachineFactory
<OPERAND, STATE extends Enum<STATE>,
EVENTTYPE extends Enum<EVENTTYPE>, EVENT> {
@ -453,6 +457,7 @@ final public class StateMachineFactory
* @param name graph name
* @return Graph object generated
*/
@SuppressWarnings("rawtypes")
public Graph generateStateGraph(String name) {
maybeMakeStateMachineTable();
Graph g = new Graph(name);

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceAudience.Public
package org.apache.hadoop.yarn.state;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -24,9 +24,18 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.service.AbstractService;
/**
* A simple liveliness monitor with which clients can register, trust the
* component to monitor liveliness, get a call-back on expiry and then finally
* unregister.
*/
@Public
@Evolving
public abstract class AbstractLivelinessMonitor<O> extends AbstractService {
private static final Log LOG = LogFactory.getLog(AbstractLivelinessMonitor.class);

View File

@ -18,9 +18,6 @@
package org.apache.hadoop.yarn.util;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
import java.io.File;
import java.io.FilenameFilter;
import java.net.MalformedURLException;
@ -34,6 +31,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Splitter;
/**
* A {@link URLClassLoader} for YARN application isolation. Classes from
* the application JARs are loaded in preference to the parent loader.

View File

@ -18,18 +18,24 @@
package org.apache.hadoop.yarn.util;
import static org.apache.hadoop.yarn.util.StringHelper._split;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import static org.apache.hadoop.yarn.util.StringHelper.*;
/**
* Yarn application related utilities
*/
@Private
public class Apps {
public static final String APP = "application";
public static final String ID = "ID";
@ -98,6 +104,8 @@ public class Apps {
private static final String SYSTEM_PATH_SEPARATOR =
System.getProperty("path.separator");
@Public
@Unstable
public static void addToEnvironment(
Map<String, String> environment,
String variable, String value) {

View File

@ -27,6 +27,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -42,6 +43,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
* from/to 'serializableFormat' to/from hadoop/nativejava data structures.
*
*/
@Private
public class ConverterUtils {
public static final String APPLICATION_PREFIX = "application";

View File

@ -28,6 +28,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
@ -46,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
* Download a single URL to the local disk.
*
*/
@LimitedPrivate({"YARN", "MapReduce"})
public class FSDownload implements Callable<Path> {
private static final Log LOG = LogFactory.getLog(FSDownload.class);

View File

@ -25,7 +25,9 @@ import java.util.List;
import java.util.Set;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
public class Graph {
public class Edge {
Node from;

View File

@ -18,12 +18,13 @@
package org.apache.hadoop.yarn.util;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@ -35,6 +36,7 @@ import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
public class RackResolver {
private static DNSToSwitchMapping dnsToSwitchMapping;
private static boolean initCalled = false;
@ -104,6 +106,7 @@ public class RackResolver {
/**
* Only used by tests
*/
@Private
@VisibleForTesting
static DNSToSwitchMapping getDnsToSwitchMapping(){
return dnsToSwitchMapping;

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.util.Shell;
* Plugin to calculate resource information on the system.
*
*/
@InterfaceAudience.Private
@InterfaceAudience.LimitedPrivate({"YARN", "MAPREDUCE"})
@InterfaceStability.Unstable
public abstract class ResourceCalculatorPlugin extends Configured {

View File

@ -18,17 +18,19 @@
package org.apache.hadoop.yarn.util;
import java.lang.reflect.Constructor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
import java.lang.reflect.Constructor;
/**
* Interface class to obtain process resource usage
*
*/
@Private
public abstract class ResourceCalculatorProcessTree extends Configured {
static final Log LOG = LogFactory
.getLog(ResourceCalculatorProcessTree.class);

View File

@ -1,55 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.util;
/**
* Some utilities for introspection
*/
public class Self {
private static boolean firstTime = true;
private static boolean isUnitTest = false;
private static boolean isJUnitTest = false;
public synchronized static boolean isUnitTest() {
detect();
return isUnitTest;
}
public synchronized static boolean isJUnitTest() {
detect();
return isJUnitTest;
}
private synchronized static void detect() {
if (!firstTime) {
return;
}
firstTime = false;
for (StackTraceElement e : new Throwable().getStackTrace()) {
String className = e.getClassName();
if (className.startsWith("org.junit")) {
isUnitTest = isJUnitTest = true;
return;
}
if (className.startsWith("org.apache.maven.surefire")) {
isUnitTest = true;
return;
}
}
}
}

View File

@ -22,9 +22,12 @@ import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience.Private;
/**
* Common string manipulation helpers
*/
@Private
public final class StringHelper {
// Common joiners to avoid per join creation of joiners
public static final Joiner SSV_JOINER = Joiner.on(' ');

View File

@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.util;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@Private
public class Times {
static final ThreadLocal<SimpleDateFormat> dateFormat =
new ThreadLocal<SimpleDateFormat>() {

View File

@ -21,8 +21,10 @@ import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.yarn.state.StateMachineFactory;
@Private
public class VisualizeStateMachine {
/**

View File

@ -24,11 +24,12 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@Private
public class WindowsBasedProcessTree extends ResourceCalculatorProcessTree {
static final Log LOG = LogFactory

View File

@ -22,10 +22,12 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
@Private
public class WindowsResourceCalculatorPlugin extends ResourceCalculatorPlugin {
static final Log LOG = LogFactory

View File

@ -0,0 +1,21 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
package org.apache.hadoop.yarn.util;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.webapp;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class BadRequestException extends WebApplicationException {
private static final long serialVersionUID = 1L;

View File

@ -18,26 +18,28 @@
package org.apache.hadoop.yarn.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import java.io.PrintWriter;
import java.util.Map;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.webapp.view.DefaultPage;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.servlet.RequestScoped;
import java.io.PrintWriter;
import java.util.Map;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import static org.apache.hadoop.yarn.util.StringHelper.*;
import org.apache.hadoop.yarn.webapp.view.DefaultPage;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public abstract class Controller implements Params {
public static final Logger LOG = LoggerFactory.getLogger(Controller.class);
static final ObjectMapper jsonMapper = new ObjectMapper();

View File

@ -27,13 +27,18 @@ import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import com.google.inject.Singleton;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
@Singleton
public class DefaultWrapperServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Private
public void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
RequestDispatcher rd = getServletContext().getNamedDispatcher("default");

View File

@ -18,33 +18,36 @@
package org.apache.hadoop.yarn.webapp;
import static com.google.common.base.Preconditions.*;
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Singleton;
import static com.google.common.base.Preconditions.checkState;
import java.io.IOException;
import java.util.Timer;
import java.util.TimerTask;
import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.http.HtmlQuoting;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.Router.Dest;
import org.apache.hadoop.yarn.webapp.view.ErrorPage;
import org.apache.hadoop.http.HtmlQuoting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Iterables;
import com.google.inject.Inject;
import com.google.inject.Injector;
import com.google.inject.Singleton;
/**
* The servlet that dispatch request to various controllers
* according to the user defined routes in the router.
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
@Singleton
public class Dispatcher extends HttpServlet {
private static final long serialVersionUID = 1L;

View File

@ -28,6 +28,7 @@ import javax.ws.rs.ext.Provider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.authorize.AuthorizationException;
@ -37,6 +38,7 @@ import com.google.inject.Singleton;
* Handle webservices jersey exceptions and create json or xml response
* with the ExceptionData.
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
@Singleton
@Provider
public class GenericExceptionHandler implements ExceptionMapper<Exception> {

View File

@ -18,6 +18,9 @@
package org.apache.hadoop.yarn.webapp;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public interface MimeType {
public static final String TEXT = "text/plain; charset=UTF-8";

View File

@ -21,11 +21,14 @@ package org.apache.hadoop.yarn.webapp;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.classification.InterfaceAudience;
/*
* Created our own NotFoundException because com.sun.jersey.api.NotFoundException
* sets the Response and therefore won't be handled by the GenericExceptionhandler
* to fill in correct response.
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public class NotFoundException extends WebApplicationException {
private static final long serialVersionUID = 1L;

View File

@ -18,11 +18,14 @@
package org.apache.hadoop.yarn.webapp;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Public static constants for webapp parameters. Do NOT put any
* private or application specific constants here as they're part of
* the API for users of the controllers and views.
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
public interface Params {
static final String TITLE = "title";
static final String TITLE_LINK = "title.href";

View File

@ -22,6 +22,8 @@ import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Contains the exception information from an exception thrown
* by the web service REST API's.
@ -31,6 +33,7 @@ import javax.xml.bind.annotation.XmlRootElement;
* message - a detailed message explaining the exception
*
*/
@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
@XmlRootElement(name = "RemoteException")
@XmlAccessorType(XmlAccessType.FIELD)
public class RemoteExceptionData {

Some files were not shown because too many files have changed in this diff Show More