| org.apache.hadoop.conf.Configuration.addDeprecation(String, String[]) | 
| org.apache.hadoop.conf.Configuration.addDeprecation(String, String[], String) | 
| org.apache.hadoop.filecache.DistributedCache.addLocalArchives(Configuration, String) | 
| org.apache.hadoop.filecache.DistributedCache.addLocalFiles(Configuration, String) | 
| org.apache.hadoop.ipc.Server.call(Writable, long) | 
| org.apache.hadoop.mapred.JobClient.cancelDelegationToken(Token) | 
| org.apache.hadoop.mapreduce.Cluster.cancelDelegationToken(Token) | 
| org.apache.hadoop.io.IOUtils.cleanup(Log, Closeable...) | 
| org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext) | 
| org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext) | 
| org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext) | 
| org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext) | 
| org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext) | 
| org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable) use ReflectionUtils.cloneInto instead. | 
| org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable) | 
| org.apache.hadoop.mapred.Counters.Counter.contentEquals(Counters.Counter) | 
| org.apache.hadoop.fs.FileUtil.copyMerge(FileSystem, Path, FileSystem, Path, boolean, Configuration, String) | 
| org.apache.hadoop.filecache.DistributedCache.createAllSymlink(Configuration, File, File) Internal to MapReduce framework.  Use DistributedCacheManager
 instead. | 
| org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.createAMRMClientAsync(AMRMClient, int, AMRMClientAsync.CallbackHandler) | 
| org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.createAMRMClientAsync(int, AMRMClientAsync.CallbackHandler) | 
| org.apache.hadoop.yarn.client.api.async.NMClientAsync.createNMClientAsync(NMClientAsync.CallbackHandler) | 
| org.apache.hadoop.fs.adl.AdlFileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable) API only for 0.20-append | 
| org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List) | 
| org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...) | 
| org.apache.hadoop.mapreduce.Job.createSymlink() | 
| org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue() | 
| org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec) | 
| org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, boolean, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata) | 
| org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, Progressable) | 
| org.apache.hadoop.fs.FileSystem.delete(Path) | 
| org.apache.hadoop.fs.azure.NativeAzureFileSystem.delete(Path) | 
| org.apache.hadoop.mapred.JobConf.deleteLocalFiles() | 
| org.apache.hadoop.mapred.Counters.findCounter(String, int, String) | 
| org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path) | 
| org.apache.hadoop.io.BytesWritable.get() | 
| org.apache.hadoop.mapreduce.Cluster.getAllJobs() | 
| org.apache.hadoop.fs.FileSystem.getAllStatistics() | 
| org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext.getAMContainerResourceRequest() | 
| org.apache.hadoop.fs.FileSystem.getBlockSize(Path) | 
| org.apache.hadoop.fs.adl.AdlFileSystem.getBlockSize(Path) Use getFileStatus() instead | 
| org.apache.hadoop.mapred.Counters.Group.getCounter(int, String) | 
| org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.getCumulativeRssmem() | 
| org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.getCumulativeRssmem(int) | 
| org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.getCumulativeVmem() | 
| org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree.getCumulativeVmem(int) | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse.getDecreasedContainers() | 
| org.apache.hadoop.fs.FileSystem.getDefaultBlockSize() | 
| org.apache.hadoop.fs.adl.AdlFileSystem.getDefaultBlockSize() | 
| org.apache.hadoop.fs.FileSystem.getDefaultReplication() | 
| org.apache.hadoop.filecache.DistributedCache.getFileStatus(Configuration, URI) | 
| org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackerNames() | 
| org.apache.hadoop.mapred.ClusterStatus.getGraylistedTrackers() | 
| org.apache.hadoop.yarn.api.records.ContainerId.getId() | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse.getIncreasedContainers() | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.getIncreaseRequests() | 
| org.apache.hadoop.mapreduce.Job.getInstance(Cluster) | 
| org.apache.hadoop.mapreduce.Job.getInstance(Cluster, Configuration) | 
| org.apache.hadoop.fs.TrashPolicy.getInstance(Configuration, FileSystem, Path) | 
| org.apache.hadoop.mapred.JobClient.getJob(String) | 
| org.apache.hadoop.mapred.JobStatus.getJobId() use getJobID instead | 
| org.apache.hadoop.mapred.RunningJob.getJobID() This method is deprecated and will be removed. Applications should 
 rather use RunningJob.getID(). | 
| org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer) | 
| org.apache.hadoop.mapred.ClusterStatus.getJobTrackerState() | 
| org.apache.hadoop.fs.FileSystem.getLength(Path) | 
| org.apache.hadoop.mapreduce.JobContext.getLocalCacheArchives() the array returned only includes the items the were 
 downloaded. There is no way to map this to what is returned by
 JobContext.getCacheArchives(). | 
| org.apache.hadoop.mapreduce.JobContext.getLocalCacheFiles() the array returned only includes the items the were 
 downloaded. There is no way to map this to what is returned by
 JobContext.getCacheFiles(). | 
| org.apache.hadoop.mapred.JobClient.getMapTaskReports(String) | 
| org.apache.hadoop.mapred.ClusterStatus.getMaxMemory() | 
| org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask() this variable is deprecated and nolonger in use. | 
| org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask() | 
| org.apache.hadoop.yarn.api.records.Resource.getMemory() | 
| org.apache.hadoop.fs.FileSystem.getName() | 
| org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration) | 
| org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.getNodeLabels() | 
| org.apache.hadoop.mapred.lib.TotalOrderPartitioner.getPartitionFile(JobConf) | 
| org.apache.hadoop.yarn.util.ConverterUtils.getPathFromYarnURL(URL) | 
| org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos() | 
| org.apache.hadoop.mapred.JobQueueInfo.getQueueState() | 
| org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String) | 
| org.apache.hadoop.fs.FileSystem.getReplication(Path) | 
| org.apache.hadoop.fs.adl.AdlFileSystem.getReplication(Path) Use getFileStatus() instead | 
| org.apache.hadoop.fs.AbstractFileSystem.getServerDefaults() | 
| org.apache.hadoop.fs.FileSystem.getServerDefaults() | 
| org.apache.hadoop.fs.viewfs.ViewFs.getServerDefaults() | 
| org.apache.hadoop.mapred.JobConf.getSessionId() | 
| org.apache.hadoop.io.BytesWritable.getSize() | 
| org.apache.hadoop.fs.FileSystem.getStatistics() | 
| org.apache.hadoop.fs.FileSystem.getStatistics(String, Class extends FileSystem>) | 
| org.apache.hadoop.mapreduce.JobContext.getSymlink() | 
| org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer) | 
| org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer) | 
| org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId() | 
| org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer) | 
| org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer) | 
| org.apache.hadoop.mapred.JobClient.getTaskOutputFilter() | 
| org.apache.hadoop.ipc.Client.getTimeout(Configuration) | 
| org.apache.hadoop.filecache.DistributedCache.getTimestamp(Configuration, URI) | 
| org.apache.hadoop.mapred.ClusterStatus.getUsedMemory() | 
| org.apache.hadoop.yarn.util.ConverterUtils.getYarnUrlFromPath(Path) | 
| org.apache.hadoop.yarn.util.ConverterUtils.getYarnUrlFromURI(URI) | 
| org.apache.hadoop.yarn.client.api.NMClient.increaseContainerResource(Container) | 
| org.apache.hadoop.yarn.client.api.async.NMClientAsync.increaseContainerResourceAsync(Container) | 
| org.apache.hadoop.yarn.api.ContainerManagementProtocol.increaseContainersResource(IncreaseContainersResourceRequest) | 
| org.apache.hadoop.fs.TrashPolicy.initialize(Configuration, FileSystem, Path) | 
| org.apache.hadoop.fs.FileStatus.isDir() | 
| org.apache.hadoop.util.Shell.isJava7OrAbove() This call isn't needed any more: please remove uses of it. | 
| org.apache.hadoop.mapreduce.TaskID.isMap() | 
| org.apache.hadoop.mapreduce.TaskAttemptID.isMap() | 
| org.apache.hadoop.mapred.FileOutputCommitter.isRecoverySupported() | 
| org.apache.hadoop.mapred.OutputCommitter.isRecoverySupported() | 
| org.apache.hadoop.mapreduce.OutputCommitter.isRecoverySupported() | 
| org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.isRecoverySupported() | 
| org.apache.hadoop.mapred.RunningJob.killTask(String, boolean) | 
| org.apache.hadoop.fs.Path.makeQualified(FileSystem) | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.newInstance(int, float, List, List, ResourceBlacklistRequest, List) | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse.newInstance(int, List, List, List, Resource, AMCommand, int, PreemptionMessage, List, List, List, CollectorInfo) | 
| org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.newInstance(Set) | 
| org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T) | 
| org.apache.hadoop.fs.FileSystem.primitiveCreate(Path, FsPermission, EnumSet, int, short, long, Progressable, Options.ChecksumOpt) | 
| org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission) | 
| org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission, boolean) | 
| org.apache.hadoop.mapred.JobID.read(DataInput) | 
| org.apache.hadoop.mapred.TaskAttemptID.read(DataInput) | 
| org.apache.hadoop.mapred.TaskID.read(DataInput) | 
| org.apache.hadoop.fs.FileSystem.rename(Path, Path, Options.Rename...) | 
| org.apache.hadoop.fs.adl.AdlFileSystem.rename(Path, Path, Options.Rename...) | 
| org.apache.hadoop.mapred.JobClient.renewDelegationToken(Token) | 
| org.apache.hadoop.mapreduce.Cluster.renewDelegationToken(Token) | 
| org.apache.hadoop.yarn.client.api.AMRMClient.requestContainerResourceChange(Container, Resource) | 
| org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.requestContainerResourceChange(Container, Resource) | 
| org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext.setAMContainerResourceRequest(ResourceRequest) | 
| org.apache.hadoop.filecache.DistributedCache.setArchiveTimestamps(Configuration, String) | 
| org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID) setAssignedJobID should not be called.
 JOBID is set by the framework. | 
| org.apache.hadoop.mapreduce.Counter.setDisplayName(String) (and no-op by default) | 
| org.apache.hadoop.filecache.DistributedCache.setFileTimestamps(Configuration, String) | 
| org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.setIncreaseRequests(List) | 
| org.apache.hadoop.filecache.DistributedCache.setLocalArchives(Configuration, String) | 
| org.apache.hadoop.filecache.DistributedCache.setLocalFiles(Configuration, String) | 
| org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String) | 
| org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long) | 
| org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long) | 
| org.apache.hadoop.yarn.api.records.Resource.setMemory(int) | 
| org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse.setNodeLabels(Set) | 
| org.apache.hadoop.mapred.lib.TotalOrderPartitioner.setPartitionFile(JobConf, Path) | 
| org.apache.hadoop.mapred.JobConf.setSessionId(String) | 
| org.apache.hadoop.mapred.jobcontrol.Job.setState(int) | 
| org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String) | 
| org.apache.hadoop.mapred.TaskCompletionEvent.setTaskID(TaskAttemptID) | 
| org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter) | 
| org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.setUseQueryStringForDelegationToken(boolean) | 
| org.apache.hadoop.mapred.Counters.size() | 
| org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf) | 
| org.apache.hadoop.fs.Syncable.sync() As of HADOOP 0.21.0, replaced by hflush | 
| org.apache.hadoop.fs.FSDataOutputStream.sync() | 
| org.apache.hadoop.yarn.util.ConverterUtils.toApplicationAttemptId(String) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(RecordFactory, String) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(String) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toContainerId(String) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toNodeId(String) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toString(ApplicationId) | 
| org.apache.hadoop.yarn.util.ConverterUtils.toString(ContainerId) |