001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.mapreduce.lib.output;
020    
021    import java.io.IOException;
022    import java.text.NumberFormat;
023    
024    import org.apache.hadoop.classification.InterfaceAudience;
025    import org.apache.hadoop.classification.InterfaceStability;
026    import org.apache.hadoop.conf.Configuration;
027    import org.apache.hadoop.fs.FileSystem;
028    import org.apache.hadoop.fs.Path;
029    import org.apache.hadoop.io.compress.CompressionCodec;
030    import org.apache.hadoop.mapred.FileAlreadyExistsException;
031    import org.apache.hadoop.mapred.InvalidJobConfException;
032    import org.apache.hadoop.mapreduce.Job;
033    import org.apache.hadoop.mapreduce.JobContext;
034    import org.apache.hadoop.mapreduce.OutputCommitter;
035    import org.apache.hadoop.mapreduce.OutputFormat;
036    import org.apache.hadoop.mapreduce.RecordWriter;
037    import org.apache.hadoop.mapreduce.TaskAttemptContext;
038    import org.apache.hadoop.mapreduce.TaskID;
039    import org.apache.hadoop.mapreduce.TaskInputOutputContext;
040    import org.apache.hadoop.mapreduce.security.TokenCache;
041    
042    /** A base class for {@link OutputFormat}s that read from {@link FileSystem}s.*/
043    @InterfaceAudience.Public
044    @InterfaceStability.Stable
045    public abstract class FileOutputFormat<K, V> extends OutputFormat<K, V> {
046    
047      /** Construct output file names so that, when an output directory listing is
048       * sorted lexicographically, positions correspond to output partitions.*/
049      private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
050      protected static final String BASE_OUTPUT_NAME = "mapreduce.output.basename";
051      protected static final String PART = "part";
052      static {
053        NUMBER_FORMAT.setMinimumIntegerDigits(5);
054        NUMBER_FORMAT.setGroupingUsed(false);
055      }
056      private FileOutputCommitter committer = null;
057    public static final String COMPRESS ="mapreduce.output.fileoutputformat.compress";
058    public static final String COMPRESS_CODEC = 
059    "mapreduce.output.fileoutputformat.compress.codec";
060    public static final String COMPRESS_TYPE = "mapreduce.output.fileoutputformat.compress.type";
061    public static final String OUTDIR = "mapreduce.output.fileoutputformat.outputdir";
062    
063      @Deprecated
064      public static enum Counter {
065        BYTES_WRITTEN
066      }
067    
068      /**
069       * Set whether the output of the job is compressed.
070       * @param job the job to modify
071       * @param compress should the output of the job be compressed?
072       */
073      public static void setCompressOutput(Job job, boolean compress) {
074        job.getConfiguration().setBoolean(FileOutputFormat.COMPRESS, compress);
075      }
076      
077      /**
078       * Is the job output compressed?
079       * @param job the Job to look in
080       * @return <code>true</code> if the job output should be compressed,
081       *         <code>false</code> otherwise
082       */
083      public static boolean getCompressOutput(JobContext job) {
084        return job.getConfiguration().getBoolean(
085          FileOutputFormat.COMPRESS, false);
086      }
087      
088      /**
089       * Set the {@link CompressionCodec} to be used to compress job outputs.
090       * @param job the job to modify
091       * @param codecClass the {@link CompressionCodec} to be used to
092       *                   compress the job outputs
093       */
094      public static void 
095      setOutputCompressorClass(Job job, 
096                               Class<? extends CompressionCodec> codecClass) {
097        setCompressOutput(job, true);
098        job.getConfiguration().setClass(FileOutputFormat.COMPRESS_CODEC, 
099                                        codecClass, 
100                                        CompressionCodec.class);
101      }
102      
103      /**
104       * Get the {@link CompressionCodec} for compressing the job outputs.
105       * @param job the {@link Job} to look in
106       * @param defaultValue the {@link CompressionCodec} to return if not set
107       * @return the {@link CompressionCodec} to be used to compress the 
108       *         job outputs
109       * @throws IllegalArgumentException if the class was specified, but not found
110       */
111      public static Class<? extends CompressionCodec> 
112      getOutputCompressorClass(JobContext job, 
113                                           Class<? extends CompressionCodec> defaultValue) {
114        Class<? extends CompressionCodec> codecClass = defaultValue;
115        Configuration conf = job.getConfiguration();
116        String name = conf.get(FileOutputFormat.COMPRESS_CODEC);
117        if (name != null) {
118          try {
119            codecClass = 
120                    conf.getClassByName(name).asSubclass(CompressionCodec.class);
121          } catch (ClassNotFoundException e) {
122            throw new IllegalArgumentException("Compression codec " + name + 
123                                               " was not found.", e);
124          }
125        }
126        return codecClass;
127      }
128      
129      public abstract RecordWriter<K, V> 
130         getRecordWriter(TaskAttemptContext job
131                         ) throws IOException, InterruptedException;
132    
133      public void checkOutputSpecs(JobContext job
134                                   ) throws FileAlreadyExistsException, IOException{
135        // Ensure that the output directory is set and not already there
136        Path outDir = getOutputPath(job);
137        if (outDir == null) {
138          throw new InvalidJobConfException("Output directory not set.");
139        }
140    
141        // get delegation token for outDir's file system
142        TokenCache.obtainTokensForNamenodes(job.getCredentials(),
143            new Path[] { outDir }, job.getConfiguration());
144    
145        if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
146          throw new FileAlreadyExistsException("Output directory " + outDir + 
147                                               " already exists");
148        }
149      }
150    
151      /**
152       * Set the {@link Path} of the output directory for the map-reduce job.
153       *
154       * @param job The job to modify
155       * @param outputDir the {@link Path} of the output directory for 
156       * the map-reduce job.
157       */
158      public static void setOutputPath(Job job, Path outputDir) {
159        try {
160          outputDir = outputDir.getFileSystem(job.getConfiguration()).makeQualified(
161              outputDir);
162        } catch (IOException e) {
163            // Throw the IOException as a RuntimeException to be compatible with MR1
164            throw new RuntimeException(e);
165        }
166        job.getConfiguration().set(FileOutputFormat.OUTDIR, outputDir.toString());
167      }
168    
169      /**
170       * Get the {@link Path} to the output directory for the map-reduce job.
171       * 
172       * @return the {@link Path} to the output directory for the map-reduce job.
173       * @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext)
174       */
175      public static Path getOutputPath(JobContext job) {
176        String name = job.getConfiguration().get(FileOutputFormat.OUTDIR);
177        return name == null ? null: new Path(name);
178      }
179      
180      /**
181       *  Get the {@link Path} to the task's temporary output directory 
182       *  for the map-reduce job
183       *  
184       * <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4>
185       * 
186       * <p>Some applications need to create/write-to side-files, which differ from
187       * the actual job-outputs.
188       * 
189       * <p>In such cases there could be issues with 2 instances of the same TIP 
190       * (running simultaneously e.g. speculative tasks) trying to open/write-to the
191       * same file (path) on HDFS. Hence the application-writer will have to pick 
192       * unique names per task-attempt (e.g. using the attemptid, say 
193       * <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> 
194       * 
195       * <p>To get around this the Map-Reduce framework helps the application-writer 
196       * out by maintaining a special 
197       * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
198       * sub-directory for each task-attempt on HDFS where the output of the 
199       * task-attempt goes. On successful completion of the task-attempt the files 
200       * in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
201       * are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
202       * framework discards the sub-directory of unsuccessful task-attempts. This 
203       * is completely transparent to the application.</p>
204       * 
205       * <p>The application-writer can take advantage of this by creating any 
206       * side-files required in a work directory during execution 
207       * of his task i.e. via 
208       * {@link #getWorkOutputPath(TaskInputOutputContext)}, and
209       * the framework will move them out similarly - thus she doesn't have to pick 
210       * unique paths per task-attempt.</p>
211       * 
212       * <p>The entire discussion holds true for maps of jobs with 
213       * reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
214       * goes directly to HDFS.</p> 
215       * 
216       * @return the {@link Path} to the task's temporary output directory 
217       * for the map-reduce job.
218       */
219      public static Path getWorkOutputPath(TaskInputOutputContext<?,?,?,?> context
220                                           ) throws IOException, 
221                                                    InterruptedException {
222        FileOutputCommitter committer = (FileOutputCommitter) 
223          context.getOutputCommitter();
224        return committer.getWorkPath();
225      }
226    
227      /**
228       * Helper function to generate a {@link Path} for a file that is unique for
229       * the task within the job output directory.
230       *
231       * <p>The path can be used to create custom files from within the map and
232       * reduce tasks. The path name will be unique for each task. The path parent
233       * will be the job output directory.</p>ls
234       *
235       * <p>This method uses the {@link #getUniqueFile} method to make the file name
236       * unique for the task.</p>
237       *
238       * @param context the context for the task.
239       * @param name the name for the file.
240       * @param extension the extension for the file
241       * @return a unique path accross all tasks of the job.
242       */
243      public 
244      static Path getPathForWorkFile(TaskInputOutputContext<?,?,?,?> context, 
245                                     String name,
246                                     String extension
247                                    ) throws IOException, InterruptedException {
248        return new Path(getWorkOutputPath(context),
249                        getUniqueFile(context, name, extension));
250      }
251    
252      /**
253       * Generate a unique filename, based on the task id, name, and extension
254       * @param context the task that is calling this
255       * @param name the base filename
256       * @param extension the filename extension
257       * @return a string like $name-[mrsct]-$id$extension
258       */
259      public synchronized static String getUniqueFile(TaskAttemptContext context,
260                                                      String name,
261                                                      String extension) {
262        TaskID taskId = context.getTaskAttemptID().getTaskID();
263        int partition = taskId.getId();
264        StringBuilder result = new StringBuilder();
265        result.append(name);
266        result.append('-');
267        result.append(
268            TaskID.getRepresentingCharacter(taskId.getTaskType()));
269        result.append('-');
270        result.append(NUMBER_FORMAT.format(partition));
271        result.append(extension);
272        return result.toString();
273      }
274    
275      /**
276       * Get the default path and filename for the output format.
277       * @param context the task context
278       * @param extension an extension to add to the filename
279       * @return a full path $output/_temporary/$taskid/part-[mr]-$id
280       * @throws IOException
281       */
282      public Path getDefaultWorkFile(TaskAttemptContext context,
283                                     String extension) throws IOException{
284        FileOutputCommitter committer = 
285          (FileOutputCommitter) getOutputCommitter(context);
286        return new Path(committer.getWorkPath(), getUniqueFile(context, 
287          getOutputName(context), extension));
288      }
289    
290      /**
291       * Get the base output name for the output file.
292       */
293      protected static String getOutputName(JobContext job) {
294        return job.getConfiguration().get(BASE_OUTPUT_NAME, PART);
295      }
296    
297      /**
298       * Set the base output name for output file to be created.
299       */
300      protected static void setOutputName(JobContext job, String name) {
301        job.getConfiguration().set(BASE_OUTPUT_NAME, name);
302      }
303    
304      public synchronized 
305         OutputCommitter getOutputCommitter(TaskAttemptContext context
306                                            ) throws IOException {
307        if (committer == null) {
308          Path output = getOutputPath(context);
309          committer = new FileOutputCommitter(output, context);
310        }
311        return committer;
312      }
313    }
314