org.apache.hadoop.conf.Configuration.addDeprecation(String, String[])
|
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[], String)
|
org.apache.hadoop.filecache.DistributedCache.addLocalArchives(Configuration, String) |
org.apache.hadoop.filecache.DistributedCache.addLocalFiles(Configuration, String) |
org.apache.hadoop.ipc.Server.call(Writable, long)
|
org.apache.hadoop.mapreduce.Cluster.cancelDelegationToken(Token<DelegationTokenIdentifier>)
|
org.apache.hadoop.mapred.JobClient.cancelDelegationToken(Token<DelegationTokenIdentifier>)
|
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext) |
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext) |
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
|
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable) |
org.apache.hadoop.mapred.Counters.Counter.contentEquals(Counters.Counter) |
org.apache.hadoop.filecache.DistributedCache.createAllSymlink(Configuration, File, File)
|
org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.createAMRMClientAsync(AMRMClient<T>, int, AMRMClientAsync.CallbackHandler)
|
org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.createAMRMClientAsync(int, AMRMClientAsync.CallbackHandler)
|
org.apache.hadoop.hdfs.client.HdfsAdmin.createEncryptionZone(Path, String) |
org.apache.hadoop.yarn.client.api.async.NMClientAsync.createNMClientAsync(NMClientAsync.CallbackHandler)
|
org.apache.hadoop.fs.adl.AdlFileSystem.createNonRecursive(Path, FsPermission, EnumSet<CreateFlag>, int, short, long, Progressable)
|
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List<PathFilter>)
|
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
|
org.apache.hadoop.mapreduce.Job.createSymlink() |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue() |
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec)
|
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, boolean, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata) |
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
|
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, Progressable)
|
org.apache.hadoop.fs.FileSystem.delete(Path)
|
org.apache.hadoop.fs.azure.NativeAzureFileSystem.delete(Path) |
org.apache.hadoop.mapred.JobConf.deleteLocalFiles() |
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
|
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
|
org.apache.hadoop.io.BytesWritable.get()
|
org.apache.hadoop.fs.permission.FsPermission.getAclBit()
|
org.apache.hadoop.mapreduce.Cluster.getAllJobs()
|
org.apache.hadoop.fs.FileSystem.getAllStatistics()
|
org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext.getAMContainerResourceRequest()
|
org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.getAttributes(String, INodeAttributes) |
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
|
org.apache.hadoop.fs.adl.AdlFileSystem.getBlockSize(Path)
|
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
|
org.apache.hadoop.fs.FileSystem.getDefaultBlockSize()
|
org.apache.hadoop.fs.adl.AdlFileSystem.getDefaultBlockSize()
|
org.apache.hadoop.fs.FileSystem.getDefaultReplication()
|
org.apache.hadoop.fs.permission.FsPermission.getEncryptedBit()
|
org.apache.hadoop.fs.permission.FsPermission.getErasureCodedBit()
|
org.apache.hadoop.filecache.DistributedCache.getFileStatus(Configuration, URI) |
org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackerNames() |
org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackers() |
org.apache.hadoop.security.UserGroupInformation.getGroups()
|
org.apache.hadoop.yarn.api.records.ContainerId.getId() |
org.apache.hadoop.mapreduce.Job.getInstance(Cluster)
|
org.apache.hadoop.mapreduce.Job.getInstance(Cluster, Configuration)
|
org.apache.hadoop.fs.TrashPolicy.getInstance(Configuration, FileSystem, Path)
|
org.apache.hadoop.mapred.JobClient.getJob(String)
|
org.apache.hadoop.mapred.JobStatus.getJobId()
|
org.apache.hadoop.mapred.RunningJob.getJobID()
|
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer) |
org.apache.hadoop.mapred.ClusterStatus.getJobTrackerState() |
org.apache.hadoop.fs.FileSystem.getLength(Path)
|
org.apache.hadoop.mapreduce.JobContext.getLocalCacheArchives()
|
org.apache.hadoop.mapreduce.JobContext.getLocalCacheFiles()
|
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
|
org.apache.hadoop.mapred.ClusterStatus.getMaxMemory() |
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
|
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
|
org.apache.hadoop.yarn.api.records.Resource.getMemory() |
org.apache.hadoop.fs.FileSystem.getName()
|
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
|
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.getNodeLabels()
|
org.apache.hadoop.mapred.lib.TotalOrderPartitioner.getPartitionFile(JobConf)
|
org.apache.hadoop.yarn.util.ConverterUtils.getPathFromYarnURL(URL) |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos() |
org.apache.hadoop.mapred.JobQueueInfo.getQueueState() |
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
|
org.apache.hadoop.fs.FileSystem.getReplication(Path)
|
org.apache.hadoop.fs.adl.AdlFileSystem.getReplication(Path)
|
org.apache.hadoop.fs.FileSystem.getServerDefaults()
|
org.apache.hadoop.fs.AbstractFileSystem.getServerDefaults()
|
org.apache.hadoop.fs.viewfs.ViewFs.getServerDefaults() |
org.apache.hadoop.mapred.JobConf.getSessionId() |
org.apache.hadoop.io.BytesWritable.getSize()
|
org.apache.hadoop.fs.FileSystem.getStatistics()
|
org.apache.hadoop.fs.FileSystem.getStatistics(String, Class<? extends FileSystem>)
|
org.apache.hadoop.mapreduce.JobContext.getSymlink() |
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer) |
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer) |
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
|
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
|
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer) |
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter() |
org.apache.hadoop.ipc.Client.getTimeout(Configuration)
|
org.apache.hadoop.filecache.DistributedCache.getTimestamp(Configuration, URI) |
org.apache.hadoop.mapred.ClusterStatus.getUsedMemory() |
org.apache.hadoop.yarn.util.ConverterUtils.getYarnUrlFromPath(Path) |
org.apache.hadoop.yarn.util.ConverterUtils.getYarnUrlFromURI(URI) |
org.apache.hadoop.yarn.client.api.NMClient.increaseContainerResource(Container) |
org.apache.hadoop.yarn.client.api.async.NMClientAsync.increaseContainerResourceAsync(Container) |
org.apache.hadoop.yarn.api.ContainerManagementProtocol.increaseContainersResource(IncreaseContainersResourceRequest) |
org.apache.hadoop.fs.TrashPolicy.initialize(Configuration, FileSystem, Path)
|
org.apache.hadoop.fs.FileStatus.isDir()
|
org.apache.hadoop.fs.FileSystem.isDirectory(Path)
|
org.apache.hadoop.fs.FileSystem.isFile(Path)
|
org.apache.hadoop.util.Shell.isJava7OrAbove()
|
org.apache.hadoop.mapreduce.TaskAttemptID.isMap() |
org.apache.hadoop.mapreduce.TaskID.isMap() |
org.apache.hadoop.mapreduce.OutputCommitter.isRecoverySupported()
|
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.isRecoverySupported() |
org.apache.hadoop.mapred.FileOutputCommitter.isRecoverySupported() |
org.apache.hadoop.mapred.OutputCommitter.isRecoverySupported()
|
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
|
org.apache.hadoop.hdfs.client.HdfsAdmin.listOpenFiles() |
org.apache.hadoop.hdfs.client.HdfsAdmin.listOpenFiles(EnumSet<OpenFilesIterator.OpenFilesType>) |
org.apache.hadoop.util.ReflectionUtils.logThreadInfo(Log, String, long)
|
org.apache.hadoop.fs.Path.makeQualified(FileSystem)
|
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.newInstance(Set<String>)
|
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
|
org.apache.hadoop.fs.FileSystem.primitiveCreate(Path, FsPermission, EnumSet<CreateFlag>, int, short, long, Progressable, Options.ChecksumOpt) |
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission) |
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission, boolean) |
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput) |
org.apache.hadoop.mapred.TaskID.read(DataInput) |
org.apache.hadoop.mapred.JobID.read(DataInput) |
org.apache.hadoop.fs.FileStatus.readFields(DataInput)
|
org.apache.hadoop.fs.permission.FsPermission.readFields(DataInput) |
org.apache.hadoop.fs.FileSystem.rename(Path, Path, Options.Rename...) |
org.apache.hadoop.fs.adl.AdlFileSystem.rename(Path, Path, Options.Rename...) |
org.apache.hadoop.mapreduce.Cluster.renewDelegationToken(Token<DelegationTokenIdentifier>)
|
org.apache.hadoop.mapred.JobClient.renewDelegationToken(Token<DelegationTokenIdentifier>)
|
org.apache.hadoop.yarn.client.api.AMRMClient.requestContainerResourceChange(Container, Resource)
|
org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.requestContainerResourceChange(Container, Resource)
|
org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext.setAMContainerResourceRequest(ResourceRequest)
|
org.apache.hadoop.filecache.DistributedCache.setArchiveTimestamps(Configuration, String) |
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
|
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
|
org.apache.hadoop.filecache.DistributedCache.setFileTimestamps(Configuration, String) |
org.apache.hadoop.filecache.DistributedCache.setLocalArchives(Configuration, String) |
org.apache.hadoop.filecache.DistributedCache.setLocalFiles(Configuration, String) |
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String) |
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long) |
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
|
org.apache.hadoop.yarn.api.records.Resource.setMemory(int) |
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.setNodeLabels(Set<String>)
|
org.apache.hadoop.mapred.lib.TotalOrderPartitioner.setPartitionFile(JobConf, Path)
|
org.apache.hadoop.mapred.JobConf.setSessionId(String) |
org.apache.hadoop.mapred.jobcontrol.Job.setState(int) |
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
|
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskID(TaskAttemptID)
|
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter) |
org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.setUseQueryStringForDelegationToken(boolean) |
org.apache.hadoop.mapred.Counters.size()
|
org.apache.hadoop.service.ServiceOperations.stopQuietly(Log, Service)
|
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
|
org.apache.hadoop.yarn.util.ConverterUtils.toApplicationAttemptId(String) |
org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(RecordFactory, String) |
org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(String) |
org.apache.hadoop.yarn.util.ConverterUtils.toContainerId(String) |
org.apache.hadoop.fs.permission.FsPermission.toExtendedShort() |
org.apache.hadoop.yarn.util.ConverterUtils.toNodeId(String) |
org.apache.hadoop.yarn.util.ConverterUtils.toString(ApplicationId) |
org.apache.hadoop.yarn.util.ConverterUtils.toString(ContainerId) |
org.apache.hadoop.fs.FileStatus.write(DataOutput)
|
org.apache.hadoop.fs.permission.FsPermission.write(DataOutput) |