Deprecated API


Contents
Deprecated Interfaces
org.apache.hadoop.io.Closeable
          use java.io.Closeable 
org.apache.hadoop.record.Index
          Replaced by Avro. 
org.apache.hadoop.record.compiler.generated.RccConstants
          Replaced by Avro. 
org.apache.hadoop.record.RecordInput
          Replaced by Avro. 
org.apache.hadoop.record.RecordOutput
          Replaced by Avro. 
 

Deprecated Classes
org.apache.hadoop.record.BinaryRecordInput
          Replaced by Avro. 
org.apache.hadoop.record.BinaryRecordOutput
          Replaced by Avro. 
org.apache.hadoop.record.Buffer
          Replaced by Avro. 
org.apache.hadoop.record.compiler.CodeBuffer
          Replaced by Avro. 
org.apache.hadoop.record.compiler.Consts
          Replaced by Avro. 
org.apache.hadoop.record.CsvRecordInput
          Replaced by Avro. 
org.apache.hadoop.record.CsvRecordOutput
          Replaced by Avro. 
org.apache.hadoop.record.meta.FieldTypeInfo
          Replaced by Avro. 
org.apache.hadoop.metrics.file.FileContext
           
org.apache.hadoop.record.compiler.JBoolean
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JBuffer
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JByte
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JDouble
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JField
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JFile
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JFloat
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JInt
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JLong
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JMap
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JRecord
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JString
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JType
          Replaced by Avro. 
org.apache.hadoop.record.compiler.JVector
          Replaced by Avro. 
org.apache.hadoop.record.meta.MapTypeID
          Replaced by Avro. 
org.apache.hadoop.record.compiler.generated.Rcc
          Replaced by Avro. 
org.apache.hadoop.record.compiler.ant.RccTask
          Replaced by Avro. 
org.apache.hadoop.record.compiler.generated.RccTokenManager
          Replaced by Avro. 
org.apache.hadoop.record.Record
          Replaced by Avro. 
org.apache.hadoop.record.RecordComparator
          Replaced by Avro. 
org.apache.hadoop.record.meta.RecordTypeInfo
          Replaced by Avro. 
org.apache.hadoop.record.compiler.generated.SimpleCharStream
          Replaced by Avro. 
org.apache.hadoop.record.meta.StructTypeID
          Replaced by Avro. 
org.apache.hadoop.record.compiler.generated.Token
          Replaced by Avro. 
org.apache.hadoop.record.meta.TypeID
          Replaced by Avro. 
org.apache.hadoop.record.Utils
          Replaced by Avro. 
org.apache.hadoop.record.meta.Utils
          Replaced by Avro. 
org.apache.hadoop.record.meta.VectorTypeID
          Replaced by Avro. 
org.apache.hadoop.record.XmlRecordInput
          Replaced by Avro. 
org.apache.hadoop.record.XmlRecordOutput
          Replaced by Avro. 
 

Deprecated Exceptions
org.apache.hadoop.fs.permission.AccessControlException
          Use AccessControlException instead. 
org.apache.hadoop.record.compiler.generated.ParseException
          Replaced by Avro. 
 

Deprecated Errors
org.apache.hadoop.record.compiler.generated.TokenMgrError
          Replaced by Avro. 
 

Deprecated Fields
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SORT_FACTOR_KEY
          Moved to mapreduce, see mapreduce.task.io.sort.factor in mapred-default.xml See https://issues.apache.org/jira/browse/HADOOP-6801 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SORT_MB_KEY
          Moved to mapreduce, see mapreduce.task.io.sort.mb in mapred-default.xml See https://issues.apache.org/jira/browse/HADOOP-6801 
org.apache.hadoop.mapred.JobConf.MAPRED_MAP_TASK_ULIMIT
          Configuration key to set the maximum virtual memory available to the map tasks (in kilo-bytes). This has been deprecated and will no longer have any effect. 
org.apache.hadoop.mapred.JobConf.MAPRED_REDUCE_TASK_ULIMIT
          Configuration key to set the maximum virtual memory available to the reduce tasks (in kilo-bytes). This has been deprecated and will no longer have any effect. 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ENV
          Use JobConf.MAPRED_MAP_TASK_ENV or JobConf.MAPRED_REDUCE_TASK_ENV 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_JAVA_OPTS
          Use JobConf.MAPRED_MAP_TASK_JAVA_OPTS or JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXPMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXVMEM_PROPERTY
          Use JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY and JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ULIMIT
          Configuration key to set the maximum virtual memory available to the child map and reduce tasks (in kilo-bytes). This has been deprecated and will no longer have any effect. 
org.apache.hadoop.mapred.JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY
            
 

Deprecated Methods
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[])
          use Configuration.addDeprecation(String key, String newKey) instead 
org.apache.hadoop.conf.Configuration.addDeprecation(String, String[], String)
          use Configuration.addDeprecation(String key, String newKey, String customMessage) instead 
org.apache.hadoop.mapred.JobClient.cancelDelegationToken(Token)
          Use Token.cancel(org.apache.hadoop.conf.Configuration) instead 
org.apache.hadoop.mapreduce.Cluster.cancelDelegationToken(Token)
          Use Token.cancel(org.apache.hadoop.conf.Configuration) instead 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(JobContext) or OutputCommitter.abortJob(JobContext, int) instead. 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(org.apache.hadoop.mapreduce.JobContext) or OutputCommitter.abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State) instead. 
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(JobContext) and OutputCommitter.abortJob(JobContext, JobStatus.State) instead. 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.io.WritableUtils.cloneInto(Writable, Writable)
          use ReflectionUtils.cloneInto instead. 
org.apache.hadoop.util.ReflectionUtils.cloneWritableInto(Writable, Writable)
           
org.apache.hadoop.mapred.Counters.Counter.contentEquals(Counters.Counter)
            
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, boolean, int, short, long, Progressable)
          API only for 0.20-append 
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, boolean, int, short, long, Progressable)
          API only for 0.20-append 
org.apache.hadoop.fs.FileSystem.createNonRecursive(Path, FsPermission, EnumSet, int, short, long, Progressable)
          API only for 0.20-append 
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List)
          Use CombineFileInputFormat.createPool(List). 
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
          Use CombineFileInputFormat.createPool(PathFilter...). 
org.apache.hadoop.mapreduce.Job.createSymlink()
           
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
            
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(Configuration, FSDataOutputStream, Class, Class, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, boolean, SequenceFile.CompressionType, CompressionCodec, SequenceFile.Metadata)
           
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, int, short, long, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, CompressionCodec, Progressable, SequenceFile.Metadata)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.io.SequenceFile.createWriter(FileSystem, Configuration, Path, Class, Class, SequenceFile.CompressionType, Progressable)
          Use SequenceFile.createWriter(Configuration, Writer.Option...) instead. 
org.apache.hadoop.fs.FileSystem.delete(Path)
          Use FileSystem.delete(Path, boolean) instead. 
org.apache.hadoop.mapred.JobConf.deleteLocalFiles()
           
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
          use Counters.findCounter(String, String) instead 
org.apache.hadoop.fs.FileUtil.fullyDelete(FileSystem, Path)
          Use FileSystem.delete(Path, boolean) 
org.apache.hadoop.io.BytesWritable.get()
          Use BytesWritable.getBytes() instead. 
org.apache.hadoop.mapreduce.Cluster.getAllJobs()
          Use Cluster.getAllJobStatuses() instead. 
org.apache.hadoop.fs.FileSystem.getBlockSize(Path)
          Use getFileStatus() instead 
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
          use Counters.Group.findCounter(String) instead 
org.apache.hadoop.fs.FileSystem.getDefaultBlockSize()
          use FileSystem.getDefaultBlockSize(Path) instead 
org.apache.hadoop.fs.FileSystem.getDefaultReplication()
          use FileSystem.getDefaultReplication(Path) instead 
org.apache.hadoop.mapreduce.Job.getInstance(Cluster)
          Use Job.getInstance() 
org.apache.hadoop.mapreduce.Job.getInstance(Cluster, Configuration)
          Use Job.getInstance(Configuration) 
org.apache.hadoop.mapred.JobClient.getJob(String)
          Applications should rather use JobClient.getJob(JobID). 
org.apache.hadoop.mapred.JobStatus.getJobId()
          use getJobID instead 
org.apache.hadoop.mapred.RunningJob.getJobID()
          This method is deprecated and will be removed. Applications should rather use RunningJob.getID(). 
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
           
org.apache.hadoop.fs.FileSystem.getLength(Path)
          Use getFileStatus() instead 
org.apache.hadoop.mapreduce.JobContext.getLocalCacheArchives()
          the array returned only includes the items the were downloaded. There is no way to map this to what is returned by JobContext.getCacheArchives(). 
org.apache.hadoop.mapreduce.JobContext.getLocalCacheFiles()
          the array returned only includes the items the were downloaded. There is no way to map this to what is returned by JobContext.getCacheFiles(). 
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
          Applications should rather use JobClient.getMapTaskReports(JobID) 
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
          this variable is deprecated and nolonger in use. 
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
          Use JobConf.getMemoryForMapTask() and JobConf.getMemoryForReduceTask() 
org.apache.hadoop.fs.FileSystem.getName()
          call #getUri() instead. 
org.apache.hadoop.fs.FileSystem.getNamed(String, Configuration)
          call #get(URI,Configuration) instead. 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
            
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
          Applications should rather use JobClient.getReduceTaskReports(JobID) 
org.apache.hadoop.fs.FileSystem.getReplication(Path)
          Use getFileStatus() instead 
org.apache.hadoop.fs.FileSystem.getServerDefaults()
          use FileSystem.getServerDefaults(Path) instead 
org.apache.hadoop.mapred.JobConf.getSessionId()
           
org.apache.hadoop.io.BytesWritable.getSize()
          Use BytesWritable.getLength() instead. 
org.apache.hadoop.fs.FileSystem.getStatistics()
          use FileSystem.getAllStatistics() instead 
org.apache.hadoop.mapreduce.JobContext.getSymlink()
           
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
           
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer)
           
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
          use TaskCompletionEvent.getTaskAttemptId() instead. 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
          Use TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer) 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer)
           
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
           
org.apache.hadoop.fs.FileStatus.isDir()
          Use FileStatus.isFile(), FileStatus.isDirectory(), and FileStatus.isSymlink() instead. 
org.apache.hadoop.mapreduce.TaskID.isMap()
           
org.apache.hadoop.mapreduce.TaskAttemptID.isMap()
           
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) 
org.apache.hadoop.fs.kfs.KosmosFileSystem.lock(Path, boolean)
           
org.apache.hadoop.fs.Path.makeQualified(FileSystem)
           
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
          Use DBRecordReader.nextKeyValue() 
org.apache.hadoop.fs.FileSystem.primitiveCreate(Path, FsPermission, EnumSet, int, short, long, Progressable, Options.ChecksumOpt)
           
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission)
           
org.apache.hadoop.fs.FileSystem.primitiveMkdir(Path, FsPermission, boolean)
           
org.apache.hadoop.mapred.TaskID.read(DataInput)
           
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
           
org.apache.hadoop.mapred.JobID.read(DataInput)
           
org.apache.hadoop.fs.kfs.KosmosFileSystem.release(Path)
           
org.apache.hadoop.fs.FileSystem.rename(Path, Path, Options.Rename...)
           
org.apache.hadoop.mapred.JobClient.renewDelegationToken(Token)
          Use Token.renew(org.apache.hadoop.conf.Configuration) instead 
org.apache.hadoop.mapreduce.Cluster.renewDelegationToken(Token)
          Use Token.renew(org.apache.hadoop.conf.Configuration) instead 
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
          setAssignedJobID should not be called. JOBID is set by the framework. 
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
          (and no-op by default) 
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
           
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
          Use JobConf.setMemoryForMapTask(long mem) and Use JobConf.setMemoryForReduceTask(long mem) 
org.apache.hadoop.mapred.JobConf.setSessionId(String)
           
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
          use TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
           
org.apache.hadoop.mapred.Counters.size()
          use AbstractCounters.countCounters() instead 
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
          Use Submitter.runJob(JobConf) 
org.apache.hadoop.fs.Syncable.sync()
          As of HADOOP 0.21.0, replaced by hflush 
org.apache.hadoop.fs.FSDataOutputStream.sync()
           
 

Deprecated Constructors
org.apache.hadoop.mapred.FileSplit(Path, long, long, JobConf)
            
org.apache.hadoop.fs.FSDataOutputStream(OutputStream)
           
org.apache.hadoop.mapreduce.Job()
           
org.apache.hadoop.mapreduce.Job(Configuration)
           
org.apache.hadoop.mapreduce.Job(Configuration, String)
           
org.apache.hadoop.mapred.TaskAttemptID(String, int, boolean, int, int)
          Use TaskAttemptID.TaskAttemptID(String, int, TaskType, int, int). 
org.apache.hadoop.mapred.TaskID(JobID, boolean, int)
          Use TaskID.TaskID(String, int, TaskType, int) 
org.apache.hadoop.mapred.TaskID(String, int, boolean, int)
          Use TaskID.TaskID(org.apache.hadoop.mapreduce.JobID, TaskType, int) 
 



Copyright © 2012 Apache Software Foundation. All Rights Reserved.