001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.fs;
019
020
021import java.io.FileNotFoundException;
022import java.io.IOException;
023import java.lang.reflect.Constructor;
024import java.net.URI;
025import java.net.URISyntaxException;
026import java.util.ArrayList;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.NoSuchElementException;
032import java.util.StringTokenizer;
033import java.util.concurrent.ConcurrentHashMap;
034
035import org.apache.commons.logging.Log;
036import org.apache.commons.logging.LogFactory;
037import org.apache.hadoop.HadoopIllegalArgumentException;
038import org.apache.hadoop.classification.InterfaceAudience;
039import org.apache.hadoop.classification.InterfaceStability;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileSystem.Statistics;
042import org.apache.hadoop.fs.Options.ChecksumOpt;
043import org.apache.hadoop.fs.Options.CreateOpts;
044import org.apache.hadoop.fs.Options.Rename;
045import org.apache.hadoop.fs.permission.FsPermission;
046import org.apache.hadoop.fs.InvalidPathException;
047import org.apache.hadoop.security.AccessControlException;
048import org.apache.hadoop.security.SecurityUtil;
049import org.apache.hadoop.security.token.Token;
050import org.apache.hadoop.util.DataChecksum;
051import org.apache.hadoop.util.Progressable;
052
053/**
054 * This class provides an interface for implementors of a Hadoop file system
055 * (analogous to the VFS of Unix). Applications do not access this class;
056 * instead they access files across all file systems using {@link FileContext}.
057 * 
058 * Pathnames passed to AbstractFileSystem can be fully qualified URI that
059 * matches the "this" file system (ie same scheme and authority) 
060 * or a Slash-relative name that is assumed to be relative
061 * to the root of the "this" file system .
062 */
063@InterfaceAudience.Public
064@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
065public abstract class AbstractFileSystem {
066  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
067
068  /** Recording statistics per a file system class. */
069  private static final Map<URI, Statistics> 
070      STATISTICS_TABLE = new HashMap<URI, Statistics>();
071  
072  /** Cache of constructors for each file system class. */
073  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
074    new ConcurrentHashMap<Class<?>, Constructor<?>>();
075  
076  private static final Class<?>[] URI_CONFIG_ARGS = 
077    new Class[]{URI.class, Configuration.class};
078  
079  /** The statistics for this file system. */
080  protected Statistics statistics;
081  
082  private final URI myUri;
083  
084  public Statistics getStatistics() {
085    return statistics;
086  }
087  
088  /**
089   * Prohibits names which contain a ".", "..", ":" or "/" 
090   */
091  private static boolean isValidName(String src) {
092    // Check for ".." "." ":" "/"
093    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
094    while(tokens.hasMoreTokens()) {
095      String element = tokens.nextToken();
096      if (element.equals("target/generated-sources") ||
097          element.equals(".")  ||
098          (element.indexOf(":") >= 0)) {
099        return false;
100      }
101    }
102    return true;
103  }
104  
105  /** 
106   * Create an object for the given class and initialize it from conf.
107   * @param theClass class of which an object is created
108   * @param conf Configuration
109   * @return a new object
110   */
111  @SuppressWarnings("unchecked")
112  static <T> T newInstance(Class<T> theClass,
113    URI uri, Configuration conf) {
114    T result;
115    try {
116      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
117      if (meth == null) {
118        meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
119        meth.setAccessible(true);
120        CONSTRUCTOR_CACHE.put(theClass, meth);
121      }
122      result = meth.newInstance(uri, conf);
123    } catch (Exception e) {
124      throw new RuntimeException(e);
125    }
126    return result;
127  }
128  
129  /**
130   * Create a file system instance for the specified uri using the conf. The
131   * conf is used to find the class name that implements the file system. The
132   * conf is also passed to the file system for its configuration.
133   *
134   * @param uri URI of the file system
135   * @param conf Configuration for the file system
136   * 
137   * @return Returns the file system for the given URI
138   *
139   * @throws UnsupportedFileSystemException file system for <code>uri</code> is
140   *           not found
141   */
142  public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
143      throws UnsupportedFileSystemException {
144    Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 
145                                uri.getScheme() + ".impl", null);
146    if (clazz == null) {
147      throw new UnsupportedFileSystemException(
148          "No AbstractFileSystem for scheme: " + uri.getScheme());
149    }
150    return (AbstractFileSystem) newInstance(clazz, uri, conf);
151  }
152
153  /**
154   * Get the statistics for a particular file system.
155   * 
156   * @param uri
157   *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
158   *          part of the uri are used.
159   * @return a statistics object
160   */
161  protected static synchronized Statistics getStatistics(URI uri) {
162    String scheme = uri.getScheme();
163    if (scheme == null) {
164      throw new IllegalArgumentException("Scheme not defined in the uri: "
165          + uri);
166    }
167    URI baseUri = getBaseUri(uri);
168    Statistics result = STATISTICS_TABLE.get(baseUri);
169    if (result == null) {
170      result = new Statistics(scheme);
171      STATISTICS_TABLE.put(baseUri, result);
172    }
173    return result;
174  }
175  
176  private static URI getBaseUri(URI uri) {
177    String scheme = uri.getScheme();
178    String authority = uri.getAuthority();
179    String baseUriString = scheme + "://";
180    if (authority != null) {
181      baseUriString = baseUriString + authority;
182    } else {
183      baseUriString = baseUriString + "/";
184    }
185    return URI.create(baseUriString);
186  }
187  
188  public static synchronized void clearStatistics() {
189    for(Statistics stat: STATISTICS_TABLE.values()) {
190      stat.reset();
191    }
192  }
193
194  /**
195   * Prints statistics for all file systems.
196   */
197  public static synchronized void printStatistics() {
198    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
199      System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
200          + pair.getKey().getAuthority() + ": " + pair.getValue());
201    }
202  }
203  
204  protected static synchronized Map<URI, Statistics> getAllStatistics() {
205    Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
206        STATISTICS_TABLE.size());
207    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
208      URI key = pair.getKey();
209      Statistics value = pair.getValue();
210      Statistics newStatsObj = new Statistics(value);
211      statsMap.put(URI.create(key.toString()), newStatsObj);
212    }
213    return statsMap;
214  }
215
216  /**
217   * The main factory method for creating a file system. Get a file system for
218   * the URI's scheme and authority. The scheme of the <code>uri</code>
219   * determines a configuration property name,
220   * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
221   * AbstractFileSystem class.
222   * 
223   * The entire URI and conf is passed to the AbstractFileSystem factory method.
224   * 
225   * @param uri for the file system to be created.
226   * @param conf which is passed to the file system impl.
227   * 
228   * @return file system for the given URI.
229   * 
230   * @throws UnsupportedFileSystemException if the file system for
231   *           <code>uri</code> is not supported.
232   */
233  public static AbstractFileSystem get(final URI uri, final Configuration conf)
234      throws UnsupportedFileSystemException {
235    return createFileSystem(uri, conf);
236  }
237
238  /**
239   * Constructor to be called by subclasses.
240   * 
241   * @param uri for this file system.
242   * @param supportedScheme the scheme supported by the implementor
243   * @param authorityNeeded if true then theURI must have authority, if false
244   *          then the URI must have null authority.
245   *
246   * @throws URISyntaxException <code>uri</code> has syntax error
247   */
248  public AbstractFileSystem(final URI uri, final String supportedScheme,
249      final boolean authorityNeeded, final int defaultPort)
250      throws URISyntaxException {
251    myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
252    statistics = getStatistics(uri); 
253  }
254  
255  /**
256   * Check that the Uri's scheme matches
257   * @param uri
258   * @param supportedScheme
259   */
260  public void checkScheme(URI uri, String supportedScheme) {
261    String scheme = uri.getScheme();
262    if (scheme == null) {
263      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
264    }
265    if (!scheme.equals(supportedScheme)) {
266      throw new HadoopIllegalArgumentException("Uri scheme " + uri
267          + " does not match the scheme " + supportedScheme);
268    }
269  }
270
271  /**
272   * Get the URI for the file system based on the given URI. The path, query
273   * part of the given URI is stripped out and default file system port is used
274   * to form the URI.
275   * 
276   * @param uri FileSystem URI.
277   * @param authorityNeeded if true authority cannot be null in the URI. If
278   *          false authority must be null.
279   * @param defaultPort default port to use if port is not specified in the URI.
280   * 
281   * @return URI of the file system
282   * 
283   * @throws URISyntaxException <code>uri</code> has syntax error
284   */
285  private URI getUri(URI uri, String supportedScheme,
286      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
287    checkScheme(uri, supportedScheme);
288    // A file system implementation that requires authority must always
289    // specify default port
290    if (defaultPort < 0 && authorityNeeded) {
291      throw new HadoopIllegalArgumentException(
292          "FileSystem implementation error -  default port " + defaultPort
293              + " is not valid");
294    }
295    String authority = uri.getAuthority();
296    if (authority == null) {
297       if (authorityNeeded) {
298         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
299       } else {
300         return new URI(supportedScheme + ":///");
301       }   
302    }
303    // authority is non null  - AuthorityNeeded may be true or false.
304    int port = uri.getPort();
305    port = (port == -1 ? defaultPort : port);
306    if (port == -1) { // no port supplied and default port is not specified
307      return new URI(supportedScheme, authority, "/", null);
308    }
309    return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
310  }
311  
312  /**
313   * The default port of this file system.
314   * 
315   * @return default port of this file system's Uri scheme
316   *         A uri with a port of -1 => default port;
317   */
318  public abstract int getUriDefaultPort();
319
320  /**
321   * Returns a URI whose scheme and authority identify this FileSystem.
322   * 
323   * @return the uri of this file system.
324   */
325  public URI getUri() {
326    return myUri;
327  }
328  
329  /**
330   * Check that a Path belongs to this FileSystem.
331   * 
332   * If the path is fully qualified URI, then its scheme and authority
333   * matches that of this file system. Otherwise the path must be 
334   * slash-relative name.
335   * 
336   * @throws InvalidPathException if the path is invalid
337   */
338  public void checkPath(Path path) {
339    URI uri = path.toUri();
340    String thatScheme = uri.getScheme();
341    String thatAuthority = uri.getAuthority();
342    if (thatScheme == null) {
343      if (thatAuthority == null) {
344        if (path.isUriPathAbsolute()) {
345          return;
346        }
347        throw new InvalidPathException("relative paths not allowed:" + 
348            path);
349      } else {
350        throw new InvalidPathException(
351            "Path without scheme with non-null autorhrity:" + path);
352      }
353    }
354    String thisScheme = this.getUri().getScheme();
355    String thisHost = this.getUri().getHost();
356    String thatHost = uri.getHost();
357    
358    // Schemes and hosts must match.
359    // Allow for null Authority for file:///
360    if (!thisScheme.equalsIgnoreCase(thatScheme) ||
361       (thisHost != null && 
362            !thisHost.equalsIgnoreCase(thatHost)) ||
363       (thisHost == null && thatHost != null)) {
364      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
365          + this.getUri());
366    }
367    
368    // Ports must match, unless this FS instance is using the default port, in
369    // which case the port may be omitted from the given URI
370    int thisPort = this.getUri().getPort();
371    int thatPort = uri.getPort();
372    if (thatPort == -1) { // -1 => defaultPort of Uri scheme
373      thatPort = this.getUriDefaultPort();
374    }
375    if (thisPort != thatPort) {
376      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
377          + this.getUri());
378    }
379  }
380  
381  /**
382   * Get the path-part of a pathname. Checks that URI matches this file system
383   * and that the path-part is a valid name.
384   * 
385   * @param p path
386   * 
387   * @return path-part of the Path p
388   */
389  public String getUriPath(final Path p) {
390    checkPath(p);
391    String s = p.toUri().getPath();
392    if (!isValidName(s)) {
393      throw new InvalidPathException("Path part " + s + " from URI " + p
394          + " is not a valid filename.");
395    }
396    return s;
397  }
398  
399  /**
400   * Make the path fully qualified to this file system
401   * @param path
402   * @return the qualified path
403   */
404  public Path makeQualified(Path path) {
405    checkPath(path);
406    return path.makeQualified(this.getUri(), null);
407  }
408  
409  /**
410   * Some file systems like LocalFileSystem have an initial workingDir
411   * that is used as the starting workingDir. For other file systems
412   * like HDFS there is no built in notion of an initial workingDir.
413   * 
414   * @return the initial workingDir if the file system has such a notion
415   *         otherwise return a null.
416   */
417  public Path getInitialWorkingDirectory() {
418    return null;
419  }
420  
421  /** 
422   * Return the current user's home directory in this file system.
423   * The default implementation returns "/user/$USER/".
424   * 
425   * @return current user's home directory.
426   */
427  public Path getHomeDirectory() {
428    return new Path("/user/"+System.getProperty("user.name")).makeQualified(
429                                                                getUri(), null);
430  }
431  
432  /**
433   * Return a set of server default configuration values.
434   * 
435   * @return server default configuration values
436   * 
437   * @throws IOException an I/O error occurred
438   */
439  public abstract FsServerDefaults getServerDefaults() throws IOException; 
440
441  /**
442   * Return the fully-qualified path of path f resolving the path
443   * through any internal symlinks or mount point
444   * @param p path to be resolved
445   * @return fully qualified path 
446   * @throws FileNotFoundException, AccessControlException, IOException
447   *         UnresolvedLinkException if symbolic link on path cannot be resolved
448   *          internally
449   */
450   public Path resolvePath(final Path p) throws FileNotFoundException,
451           UnresolvedLinkException, AccessControlException, IOException {
452     checkPath(p);
453     return getFileStatus(p).getPath(); // default impl is to return the path
454   }
455  
456  /**
457   * The specification of this method matches that of
458   * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
459   * that the Path f must be fully qualified and the permission is absolute
460   * (i.e. umask has been applied).
461   */
462  public final FSDataOutputStream create(final Path f,
463      final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
464      throws AccessControlException, FileAlreadyExistsException,
465      FileNotFoundException, ParentNotDirectoryException,
466      UnsupportedFileSystemException, UnresolvedLinkException, IOException {
467    checkPath(f);
468    int bufferSize = -1;
469    short replication = -1;
470    long blockSize = -1;
471    int bytesPerChecksum = -1;
472    ChecksumOpt checksumOpt = null;
473    FsPermission permission = null;
474    Progressable progress = null;
475    Boolean createParent = null;
476 
477    for (CreateOpts iOpt : opts) {
478      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
479        if (blockSize != -1) {
480          throw new HadoopIllegalArgumentException(
481              "BlockSize option is set multiple times");
482        }
483        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
484      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
485        if (bufferSize != -1) {
486          throw new HadoopIllegalArgumentException(
487              "BufferSize option is set multiple times");
488        }
489        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
490      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
491        if (replication != -1) {
492          throw new HadoopIllegalArgumentException(
493              "ReplicationFactor option is set multiple times");
494        }
495        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
496      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
497        if (bytesPerChecksum != -1) {
498          throw new HadoopIllegalArgumentException(
499              "BytesPerChecksum option is set multiple times");
500        }
501        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
502      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
503        if (checksumOpt != null) {
504          throw new  HadoopIllegalArgumentException(
505              "CreateChecksumType option is set multiple times");
506        }
507        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
508      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
509        if (permission != null) {
510          throw new HadoopIllegalArgumentException(
511              "Perms option is set multiple times");
512        }
513        permission = ((CreateOpts.Perms) iOpt).getValue();
514      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
515        if (progress != null) {
516          throw new HadoopIllegalArgumentException(
517              "Progress option is set multiple times");
518        }
519        progress = ((CreateOpts.Progress) iOpt).getValue();
520      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
521        if (createParent != null) {
522          throw new HadoopIllegalArgumentException(
523              "CreateParent option is set multiple times");
524        }
525        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
526      } else {
527        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
528            iOpt.getClass().getName());
529      }
530    }
531    if (permission == null) {
532      throw new HadoopIllegalArgumentException("no permission supplied");
533    }
534
535
536    FsServerDefaults ssDef = getServerDefaults();
537    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
538      throw new IOException("Internal error: default blockSize is" + 
539          " not a multiple of default bytesPerChecksum ");
540    }
541    
542    if (blockSize == -1) {
543      blockSize = ssDef.getBlockSize();
544    }
545
546    // Create a checksum option honoring user input as much as possible.
547    // If bytesPerChecksum is specified, it will override the one set in
548    // checksumOpt. Any missing value will be filled in using the default.
549    ChecksumOpt defaultOpt = new ChecksumOpt(
550        ssDef.getChecksumType(),
551        ssDef.getBytesPerChecksum());
552    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
553        checksumOpt, bytesPerChecksum);
554
555    if (bufferSize == -1) {
556      bufferSize = ssDef.getFileBufferSize();
557    }
558    if (replication == -1) {
559      replication = ssDef.getReplication();
560    }
561    if (createParent == null) {
562      createParent = false;
563    }
564
565    if (blockSize % bytesPerChecksum != 0) {
566      throw new HadoopIllegalArgumentException(
567             "blockSize should be a multiple of checksumsize");
568    }
569
570    return this.createInternal(f, createFlag, permission, bufferSize,
571      replication, blockSize, progress, checksumOpt, createParent);
572  }
573
574  /**
575   * The specification of this method matches that of
576   * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
577   * have been declared explicitly.
578   */
579  public abstract FSDataOutputStream createInternal(Path f,
580      EnumSet<CreateFlag> flag, FsPermission absolutePermission,
581      int bufferSize, short replication, long blockSize, Progressable progress,
582      ChecksumOpt checksumOpt, boolean createParent)
583      throws AccessControlException, FileAlreadyExistsException,
584      FileNotFoundException, ParentNotDirectoryException,
585      UnsupportedFileSystemException, UnresolvedLinkException, IOException;
586
587  /**
588   * The specification of this method matches that of
589   * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
590   * f must be fully qualified and the permission is absolute (i.e. 
591   * umask has been applied).
592   */
593  public abstract void mkdir(final Path dir, final FsPermission permission,
594      final boolean createParent) throws AccessControlException,
595      FileAlreadyExistsException, FileNotFoundException,
596      UnresolvedLinkException, IOException;
597
598  /**
599   * The specification of this method matches that of
600   * {@link FileContext#delete(Path, boolean)} except that Path f must be for
601   * this file system.
602   */
603  public abstract boolean delete(final Path f, final boolean recursive)
604      throws AccessControlException, FileNotFoundException,
605      UnresolvedLinkException, IOException;
606
607  /**
608   * The specification of this method matches that of
609   * {@link FileContext#open(Path)} except that Path f must be for this
610   * file system.
611   */
612  public FSDataInputStream open(final Path f) throws AccessControlException,
613      FileNotFoundException, UnresolvedLinkException, IOException {
614    return open(f, getServerDefaults().getFileBufferSize());
615  }
616
617  /**
618   * The specification of this method matches that of
619   * {@link FileContext#open(Path, int)} except that Path f must be for this
620   * file system.
621   */
622  public abstract FSDataInputStream open(final Path f, int bufferSize)
623      throws AccessControlException, FileNotFoundException,
624      UnresolvedLinkException, IOException;
625
626  /**
627   * The specification of this method matches that of
628   * {@link FileContext#setReplication(Path, short)} except that Path f must be
629   * for this file system.
630   */
631  public abstract boolean setReplication(final Path f,
632      final short replication) throws AccessControlException,
633      FileNotFoundException, UnresolvedLinkException, IOException;
634
635  /**
636   * The specification of this method matches that of
637   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
638   * f must be for this file system.
639   */
640  public final void rename(final Path src, final Path dst,
641      final Options.Rename... options) throws AccessControlException,
642      FileAlreadyExistsException, FileNotFoundException,
643      ParentNotDirectoryException, UnresolvedLinkException, IOException {
644    boolean overwrite = false;
645    if (null != options) {
646      for (Rename option : options) {
647        if (option == Rename.OVERWRITE) {
648          overwrite = true;
649        }
650      }
651    }
652    renameInternal(src, dst, overwrite);
653  }
654  
655  /**
656   * The specification of this method matches that of
657   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
658   * f must be for this file system and NO OVERWRITE is performed.
659   * 
660   * File systems that do not have a built in overwrite need implement only this
661   * method and can take advantage of the default impl of the other
662   * {@link #renameInternal(Path, Path, boolean)}
663   */
664  public abstract void renameInternal(final Path src, final Path dst)
665      throws AccessControlException, FileAlreadyExistsException,
666      FileNotFoundException, ParentNotDirectoryException,
667      UnresolvedLinkException, IOException;
668  
669  /**
670   * The specification of this method matches that of
671   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
672   * f must be for this file system.
673   */
674  public void renameInternal(final Path src, final Path dst,
675      boolean overwrite) throws AccessControlException,
676      FileAlreadyExistsException, FileNotFoundException,
677      ParentNotDirectoryException, UnresolvedLinkException, IOException {
678    // Default implementation deals with overwrite in a non-atomic way
679    final FileStatus srcStatus = getFileLinkStatus(src);
680
681    FileStatus dstStatus;
682    try {
683      dstStatus = getFileLinkStatus(dst);
684    } catch (IOException e) {
685      dstStatus = null;
686    }
687    if (dstStatus != null) {
688      if (dst.equals(src)) {
689        throw new FileAlreadyExistsException(
690            "The source "+src+" and destination "+dst+" are the same");
691      }
692      if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
693        throw new FileAlreadyExistsException(
694            "Cannot rename symlink "+src+" to its target "+dst);
695      }
696      // It's OK to rename a file to a symlink and vice versa
697      if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
698        throw new IOException("Source " + src + " and destination " + dst
699            + " must both be directories");
700      }
701      if (!overwrite) {
702        throw new FileAlreadyExistsException("Rename destination " + dst
703            + " already exists.");
704      }
705      // Delete the destination that is a file or an empty directory
706      if (dstStatus.isDirectory()) {
707        RemoteIterator<FileStatus> list = listStatusIterator(dst);
708        if (list != null && list.hasNext()) {
709          throw new IOException(
710              "Rename cannot overwrite non empty destination directory " + dst);
711        }
712      }
713      delete(dst, false);
714    } else {
715      final Path parent = dst.getParent();
716      final FileStatus parentStatus = getFileStatus(parent);
717      if (parentStatus.isFile()) {
718        throw new ParentNotDirectoryException("Rename destination parent "
719            + parent + " is a file.");
720      }
721    }
722    renameInternal(src, dst);
723  }
724  
725  /**
726   * Returns true if the file system supports symlinks, false otherwise.
727   */
728  public boolean supportsSymlinks() {
729    return false;
730  }
731  
732  /**
733   * The specification of this method matches that of  
734   * {@link FileContext#createSymlink(Path, Path, boolean)};
735   */
736  public void createSymlink(final Path target, final Path link,
737      final boolean createParent) throws IOException, UnresolvedLinkException {
738    throw new IOException("File system does not support symlinks");    
739  }
740
741  /**
742   * The specification of this method matches that of  
743   * {@link FileContext#getLinkTarget(Path)};
744   */
745  public Path getLinkTarget(final Path f) throws IOException {
746    /* We should never get here. Any file system that threw an
747     * UnresolvedLinkException, causing this function to be called,
748     * needs to override this method.
749     */
750    throw new AssertionError();
751  }
752    
753  /**
754   * The specification of this method matches that of
755   * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
756   * must be for this file system.
757   */
758  public abstract void setPermission(final Path f,
759      final FsPermission permission) throws AccessControlException,
760      FileNotFoundException, UnresolvedLinkException, IOException;
761
762  /**
763   * The specification of this method matches that of
764   * {@link FileContext#setOwner(Path, String, String)} except that Path f must
765   * be for this file system.
766   */
767  public abstract void setOwner(final Path f, final String username,
768      final String groupname) throws AccessControlException,
769      FileNotFoundException, UnresolvedLinkException, IOException;
770
771  /**
772   * The specification of this method matches that of
773   * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
774   * for this file system.
775   */
776  public abstract void setTimes(final Path f, final long mtime,
777    final long atime) throws AccessControlException, FileNotFoundException,
778      UnresolvedLinkException, IOException;
779
780  /**
781   * The specification of this method matches that of
782   * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
783   * this file system.
784   */
785  public abstract FileChecksum getFileChecksum(final Path f)
786      throws AccessControlException, FileNotFoundException,
787      UnresolvedLinkException, IOException;
788  
789  /**
790   * The specification of this method matches that of
791   * {@link FileContext#getFileStatus(Path)} 
792   * except that an UnresolvedLinkException may be thrown if a symlink is 
793   * encountered in the path.
794   */
795  public abstract FileStatus getFileStatus(final Path f)
796      throws AccessControlException, FileNotFoundException,
797      UnresolvedLinkException, IOException;
798
799  /**
800   * The specification of this method matches that of
801   * {@link FileContext#getFileLinkStatus(Path)}
802   * except that an UnresolvedLinkException may be thrown if a symlink is  
803   * encountered in the path leading up to the final path component.
804   * If the file system does not support symlinks then the behavior is
805   * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
806   */
807  public FileStatus getFileLinkStatus(final Path f)
808      throws AccessControlException, FileNotFoundException,
809      UnsupportedFileSystemException, IOException {
810    return getFileStatus(f);
811  }
812
813  /**
814   * The specification of this method matches that of
815   * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
816   * Path f must be for this file system.
817   */
818  public abstract BlockLocation[] getFileBlockLocations(final Path f,
819      final long start, final long len) throws AccessControlException,
820      FileNotFoundException, UnresolvedLinkException, IOException;
821
822  /**
823   * The specification of this method matches that of
824   * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
825   * file system.
826   */
827  public FsStatus getFsStatus(final Path f) throws AccessControlException,
828      FileNotFoundException, UnresolvedLinkException, IOException {
829    // default impl gets FsStatus of root
830    return getFsStatus();
831  }
832  
833  /**
834   * The specification of this method matches that of
835   * {@link FileContext#getFsStatus(Path)}.
836   */
837  public abstract FsStatus getFsStatus() throws AccessControlException,
838      FileNotFoundException, IOException;
839
840  /**
841   * The specification of this method matches that of
842   * {@link FileContext#listStatus(Path)} except that Path f must be for this
843   * file system.
844   */
845  public RemoteIterator<FileStatus> listStatusIterator(final Path f)
846      throws AccessControlException, FileNotFoundException,
847      UnresolvedLinkException, IOException {
848    return new RemoteIterator<FileStatus>() {
849      private int i = 0;
850      private FileStatus[] statusList = listStatus(f);
851      
852      @Override
853      public boolean hasNext() {
854        return i < statusList.length;
855      }
856      
857      @Override
858      public FileStatus next() {
859        if (!hasNext()) {
860          throw new NoSuchElementException();
861        }
862        return statusList[i++];
863      }
864    };
865  }
866
867  /**
868   * The specification of this method matches that of
869   * {@link FileContext#listLocatedStatus(Path)} except that Path f 
870   * must be for this file system.
871   */
872  public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
873      throws AccessControlException, FileNotFoundException,
874      UnresolvedLinkException, IOException {
875    return new RemoteIterator<LocatedFileStatus>() {
876      private RemoteIterator<FileStatus> itor = listStatusIterator(f);
877      
878      @Override
879      public boolean hasNext() throws IOException {
880        return itor.hasNext();
881      }
882      
883      @Override
884      public LocatedFileStatus next() throws IOException {
885        if (!hasNext()) {
886          throw new NoSuchElementException("No more entry in " + f);
887        }
888        FileStatus result = itor.next();
889        BlockLocation[] locs = null;
890        if (result.isFile()) {
891          locs = getFileBlockLocations(
892              result.getPath(), 0, result.getLen());
893        }
894        return new LocatedFileStatus(result, locs);
895      }
896    };
897  }
898
899  /**
900   * The specification of this method matches that of
901   * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
902   * for this file system.
903   */
904  public abstract FileStatus[] listStatus(final Path f)
905      throws AccessControlException, FileNotFoundException,
906      UnresolvedLinkException, IOException;
907
908  /**
909   * @return an iterator over the corrupt files under the given path
910   * (may contain duplicates if a file has more than one corrupt block)
911   * @throws IOException
912   */
913  public RemoteIterator<Path> listCorruptFileBlocks(Path path)
914    throws IOException {
915    throw new UnsupportedOperationException(getClass().getCanonicalName() +
916                                            " does not support" +
917                                            " listCorruptFileBlocks");
918  }
919
920  /**
921   * The specification of this method matches that of
922   * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
923   * must be for this file system.
924   */
925  public abstract void setVerifyChecksum(final boolean verifyChecksum)
926      throws AccessControlException, IOException;
927  
928  /**
929   * Get a canonical name for this file system.
930   * @return a URI string that uniquely identifies this file system
931   */
932  public String getCanonicalServiceName() {
933    return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
934  }
935  
936  /**
937   * Get one or more delegation tokens associated with the filesystem. Normally
938   * a file system returns a single delegation token. A file system that manages
939   * multiple file systems underneath, could return set of delegation tokens for
940   * all the file systems it manages
941   * 
942   * @param renewer the account name that is allowed to renew the token.
943   * @return List of delegation tokens.
944   *   If delegation tokens not supported then return a list of size zero.
945   * @throws IOException
946   */
947  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
948  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
949    return new ArrayList<Token<?>>(0);
950  }
951  
952  @Override //Object
953  public int hashCode() {
954    return myUri.hashCode();
955  }
956  
957  @Override //Object
958  public boolean equals(Object other) {
959    if (other == null || !(other instanceof AbstractFileSystem)) {
960      return false;
961    }
962    return myUri.equals(((AbstractFileSystem) other).myUri);
963  }
964}