| 
Deprecated Methods | 
org.apache.hadoop.filecache.DistributedCache.addArchiveToClassPath(Path, Configuration)
 
          Please use DistributedCache.addArchiveToClassPath(Path, Configuration, FileSystem) 
 instead.  The FileSystem should be obtained within an
 appropriate doAs.  | 
org.apache.hadoop.filecache.DistributedCache.addFileToClassPath(Path, Configuration)
 
          Please use DistributedCache.addFileToClassPath(Path, Configuration, FileSystem) 
 instead.  The FileSystem should be obtained within an
 appropriate doAs.  | 
org.apache.hadoop.http.HttpServer.addInternalServlet(String, String, Class extends HttpServlet>)
 
          this is a temporary method  | 
org.apache.hadoop.http.HttpServer.addSslListener(InetSocketAddress, String, String, String)
 
          Use HttpServer.addSslListener(InetSocketAddress, Configuration, boolean)  | 
org.apache.hadoop.fs.FsShell.byteDesc(long)
 
          Consider using StringUtils.byteDesc(long) instead.  | 
org.apache.hadoop.ipc.RPC.call(Method, Object[][], InetSocketAddress[], Configuration)
 
          Use RPC.call(Method, Object[][], InetSocketAddress[], UserGroupInformation, Configuration) instead  | 
org.apache.hadoop.ipc.Client.call(Writable[], InetSocketAddress[])
 
          Use Client.call(Writable[], InetSocketAddress[], 
 Class, UserGroupInformation, Configuration) instead  | 
org.apache.hadoop.ipc.Client.call(Writable[], InetSocketAddress[], Class>, UserGroupInformation)
 
          Use Client.call(Writable[], InetSocketAddress[], 
 Class, UserGroupInformation, Configuration) instead  | 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress)
 
          Use Client.call(Writable, ConnectionId) instead  | 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress, Class>, UserGroupInformation, int)
 
          Use Client.call(Writable, ConnectionId) instead  | 
org.apache.hadoop.ipc.Client.call(Writable, InetSocketAddress, UserGroupInformation)
 
          Use Client.call(Writable, ConnectionId) instead  | 
org.apache.hadoop.ipc.Server.call(Writable, long)
 
          Use Server.call(Class, Writable, long) instead  | 
org.apache.hadoop.mapred.TaskLog.captureOutAndError(List, List, File, File, long, boolean, String)
 
          pidFiles are no more used. Instead pid is exported to
             env variable JVM_PID.  | 
org.apache.hadoop.mapred.TaskLog.captureOutAndError(List, List, File, File, long, String)
 
          pidFiles are no more used. Instead pid is exported to
             env variable JVM_PID.  | 
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
 
            | 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
 
          use OutputCommitter.commitJob(JobContext) or
                 OutputCommitter.abortJob(JobContext, int) instead  | 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
 
            | 
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
 
          use OutputCommitter.commitJob(JobContext) or
                 OutputCommitter.abortJob(JobContext, JobStatus.State) instead  | 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
 
            | 
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
 
          use ReflectionUtils.cloneInto instead.  | 
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable)
 
            | 
org.apache.hadoop.filecache.DistributedCache.createAllSymlink(Configuration, File, File)
 
          Internal to MapReduce framework.  Use DistributedCacheManager
 instead.  | 
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, boolean, int, short, long, Progressable)
 
          API only for 0.20-append  | 
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, boolean, int, short, long, Progressable)
 
          API only for 0.20-append  | 
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(byte[], byte[])
 
          Use TFile.Reader.createScannerByKey(byte[], byte[]) instead.  | 
org.apache.hadoop.io.file.tfile.TFile.Reader.createScanner(RawComparable, RawComparable)
 
          Use TFile.Reader.createScannerByKey(RawComparable, RawComparable)
             instead.  | 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
 
             | 
org.apache.hadoop.fs.FilterFileSystem.delete(Path)
 
            | 
org.apache.hadoop.fs.RawLocalFileSystem.delete(Path)
 
            | 
org.apache.hadoop.fs.FileSystem.delete(Path)
 
          Use delete(Path, boolean) instead  | 
org.apache.hadoop.fs.ftp.FTPFileSystem.delete(Path)
 
          Use delete(Path, boolean) instead  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.delete(Path)
 
            | 
org.apache.hadoop.fs.s3.S3FileSystem.delete(Path)
 
            | 
org.apache.hadoop.fs.s3native.NativeS3FileSystem.delete(Path)
 
            | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findByte(byte[], int, int, byte)
 
          use 
 UTF8ByteArrayUtils.findByte(byte[], int,
  int, byte)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[])
 
          use 
 UTF8ByteArrayUtils.findBytes(byte[], int, 
 int, byte[])  | 
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
 
             | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], byte, int)
 
          use 
 UTF8ByteArrayUtils.findNthByte(byte[], 
 byte, int)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int)
 
          use 
 UTF8ByteArrayUtils.findNthByte(byte[], int, 
 int, byte, int)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[])
 
          use StreamKeyValUtil.findTab(byte[])  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[], int, int)
 
          use StreamKeyValUtil.findTab(byte[], int, int)  | 
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
 
          Use FileSystem.delete(Path, boolean)  | 
org.apache.hadoop.io.BytesWritable.get()
 
          Use BytesWritable.getBytes() instead.  | 
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
 
          Use getFileStatus() instead  | 
org.apache.hadoop.streaming.StreamJob.getClusterNick()
 
            | 
org.apache.hadoop.mapred.JobTracker.getClusterStatus()
 
          use JobTracker.getClusterStatus(boolean)  | 
org.apache.hadoop.io.SequenceFile.getCompressionType(Configuration)
 
          Use 
             SequenceFileOutputFormat.getOutputCompressionType(org.apache.hadoop.mapred.JobConf) 
             to get SequenceFile.CompressionType for job-outputs.  | 
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
 
          use Counters.Group.getCounter(String) instead  | 
org.apache.hadoop.fs.FileSystem.getDefaultBlockSize()
 
          use FileSystem.getDefaultBlockSize(Path) instead  | 
org.apache.hadoop.fs.FileSystem.getDefaultReplication()
 
          use FileSystem.getDefaultReplication(Path) instead  | 
org.apache.hadoop.mapred.JobClient.getJob(String)
 
          Applications should rather use JobClient.getJob(JobID).  | 
org.apache.hadoop.mapred.JobStatus.getJobId()
 
          use getJobID instead  | 
org.apache.hadoop.mapred.JobProfile.getJobId()
 
          use getJobID() instead  | 
org.apache.hadoop.mapred.RunningJob.getJobID()
 
          This method is deprecated and will be removed. Applications should 
 rather use RunningJob.getID().  | 
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
 
            | 
org.apache.hadoop.fs.FileSystem.getLength(Path)
 
          Use getFileStatus() instead  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getLength(Path)
 
            | 
org.apache.hadoop.mapred.jobcontrol.Job.getMapredJobID()
 
          use Job.getAssignedJobID() instead  | 
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
 
          Applications should rather use JobClient.getMapTaskReports(JobID)  | 
org.apache.hadoop.mapred.ClusterStatus.getMaxMemory()
 
            | 
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
 
          this variable is deprecated and nolonger in use.  | 
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
 
          Use JobConf.getMemoryForMapTask() and
             JobConf.getMemoryForReduceTask()  | 
org.apache.hadoop.fs.FilterFileSystem.getName()
 
          call #getUri() instead.  | 
org.apache.hadoop.fs.FileSystem.getName()
 
          call #getUri() instead.  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getName()
 
            | 
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
 
          call #get(URI,Configuration) instead.  | 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
 
             | 
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
 
          Applications should rather use JobClient.getReduceTaskReports(JobID)  | 
org.apache.hadoop.fs.FileSystem.getReplication(Path)
 
          Use getFileStatus() instead  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.getReplication(Path)
 
            | 
org.apache.hadoop.net.NetUtils.getServerAddress(Configuration, String, String, String)
 
            | 
org.apache.hadoop.io.BytesWritable.getSize()
 
          Use BytesWritable.getLength() instead.  | 
org.apache.hadoop.fs.FileSystem.getStatistics()
 
          use FileSystem.getAllStatistics() instead  | 
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
 
            | 
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
 
          use TaskCompletionEvent.getTaskAttemptId() instead.  | 
org.apache.hadoop.mapred.TaskReport.getTaskId()
 
          use TaskReport.getTaskID() instead  | 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
 
            | 
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
 
            | 
org.apache.hadoop.util.Shell.getUlimitMemoryCommand(Configuration)
 
          Use Shell.getUlimitMemoryCommand(int)  | 
org.apache.hadoop.mapred.ClusterStatus.getUsedMemory()
 
            | 
org.apache.hadoop.streaming.StreamJob.go()
 
          use StreamJob.run(String[]) instead.  | 
org.apache.hadoop.fs.FileSystem.isDirectory(Path)
 
          Use getFileStatus() instead  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.isDirectory(Path)
 
            | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.isFile(Path)
 
            | 
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
 
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean)  | 
org.apache.hadoop.fs.FsShell.limitDecimalTo2(double)
 
          Consider using StringUtils.limitDecimalTo2(double) instead.  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean)
 
            | 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String)
 
          Use
 JobHistory.MapAttempt.logFailed(TaskAttemptID, long, String, String, String)  | 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String)
 
          Use 
 JobHistory.ReduceAttempt.logFailed(TaskAttemptID, long, String, String, String)  | 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String)
 
          Use 
 JobHistory.ReduceAttempt.logFinished(TaskAttemptID, long, long, long, String, String, String, Counters)  | 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String)
 
          Use 
 JobHistory.MapAttempt.logFinished(TaskAttemptID, long, String, String, String, Counters)  | 
org.apache.hadoop.mapred.JobHistory.JobInfo.logJobInfo(JobID, long, long, int)
 
          Use JobHistory.JobInfo.logJobInfo(JobID, long, long) instead.  | 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String)
 
          Use 
 JobHistory.MapAttempt.logKilled(TaskAttemptID, long, String, String, String)  | 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String)
 
          Use 
 JobHistory.ReduceAttempt.logKilled(TaskAttemptID, long, String, String, String)  | 
org.apache.hadoop.mapred.JobHistory.JobInfo.logStarted(JobID, long, int, int)
 
          Use JobHistory.JobInfo.logInited(JobID, long, int, int) and 
 JobHistory.JobInfo.logStarted(JobID)  | 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String)
 
          Use 
             JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String, int, String)  | 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String)
 
          Use 
 JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String, int, String)  | 
org.apache.hadoop.mapred.JobHistory.MapAttempt.logStarted(TaskAttemptID, long, String, int, String)
 
            | 
org.apache.hadoop.mapred.JobHistory.ReduceAttempt.logStarted(TaskAttemptID, long, String, int, String)
 
            | 
org.apache.hadoop.mapred.JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long)
 
          Use 
     JobHistory.JobInfo.logSubmitted(JobID, JobConf, String, long, boolean) instead.  | 
org.apache.hadoop.io.SequenceFile.Reader.next(DataOutputBuffer)
 
          Call SequenceFile.Reader.nextRaw(DataOutputBuffer,SequenceFile.ValueBytes).  | 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
 
          Use DBRecordReader.nextKeyValue()  | 
org.apache.hadoop.mapred.JobID.read(DataInput)
 
            | 
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
 
            | 
org.apache.hadoop.mapred.TaskID.read(DataInput)
 
            | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLine(LineReader, Text)
 
          use 
 StreamKeyValUtil.readLine(LineReader, Text)  | 
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path)
 
            | 
org.apache.hadoop.io.SequenceFile.setCompressionType(Configuration, SequenceFile.CompressionType)
 
          Use the one of the many SequenceFile.createWriter methods to specify
             the SequenceFile.CompressionType while creating the SequenceFile or
             SequenceFileOutputFormat.setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)
             to specify the SequenceFile.CompressionType for job-outputs. 
 or  | 
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
 
            | 
org.apache.hadoop.mapred.jobcontrol.Job.setMapredJobID(String)
 
          use Job.setAssignedJobID(JobID) instead  | 
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
 
            | 
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
 
          Use JobConf.setMemoryForMapTask(long mem)  and
  Use JobConf.setMemoryForReduceTask(long mem)  | 
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
 
          use TaskCompletionEvent.setTaskID(TaskAttemptID) instead.  | 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
 
            | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int)
 
          use 
 StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int, int)
 
          use 
 StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, 
 int, int)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int)
 
          use 
 StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int)  | 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int, int)
 
          use 
 StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int, int)  | 
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
 
          Use Submitter.runJob(JobConf)  |