001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 package org.apache.hadoop.fs; 019 020 021 import java.io.FileNotFoundException; 022 import java.io.IOException; 023 import java.lang.reflect.Constructor; 024 import java.net.URI; 025 import java.net.URISyntaxException; 026 import java.util.ArrayList; 027 import java.util.EnumSet; 028 import java.util.HashMap; 029 import java.util.List; 030 import java.util.Map; 031 import java.util.NoSuchElementException; 032 import java.util.StringTokenizer; 033 import java.util.concurrent.ConcurrentHashMap; 034 035 import org.apache.commons.logging.Log; 036 import org.apache.commons.logging.LogFactory; 037 import org.apache.hadoop.HadoopIllegalArgumentException; 038 import org.apache.hadoop.classification.InterfaceAudience; 039 import org.apache.hadoop.classification.InterfaceStability; 040 import org.apache.hadoop.conf.Configuration; 041 import org.apache.hadoop.fs.FileSystem.Statistics; 042 import org.apache.hadoop.fs.Options.ChecksumOpt; 043 import org.apache.hadoop.fs.Options.CreateOpts; 044 import org.apache.hadoop.fs.Options.Rename; 045 import org.apache.hadoop.fs.permission.AclEntry; 046 import org.apache.hadoop.fs.permission.AclStatus; 047 import org.apache.hadoop.fs.permission.FsPermission; 048 import org.apache.hadoop.fs.InvalidPathException; 049 import org.apache.hadoop.security.AccessControlException; 050 import org.apache.hadoop.security.SecurityUtil; 051 import org.apache.hadoop.security.token.Token; 052 import org.apache.hadoop.util.Progressable; 053 054 /** 055 * This class provides an interface for implementors of a Hadoop file system 056 * (analogous to the VFS of Unix). Applications do not access this class; 057 * instead they access files across all file systems using {@link FileContext}. 058 * 059 * Pathnames passed to AbstractFileSystem can be fully qualified URI that 060 * matches the "this" file system (ie same scheme and authority) 061 * or a Slash-relative name that is assumed to be relative 062 * to the root of the "this" file system . 063 */ 064 @InterfaceAudience.Public 065 @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ 066 public abstract class AbstractFileSystem { 067 static final Log LOG = LogFactory.getLog(AbstractFileSystem.class); 068 069 /** Recording statistics per a file system class. */ 070 private static final Map<URI, Statistics> 071 STATISTICS_TABLE = new HashMap<URI, Statistics>(); 072 073 /** Cache of constructors for each file system class. */ 074 private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 075 new ConcurrentHashMap<Class<?>, Constructor<?>>(); 076 077 private static final Class<?>[] URI_CONFIG_ARGS = 078 new Class[]{URI.class, Configuration.class}; 079 080 /** The statistics for this file system. */ 081 protected Statistics statistics; 082 083 private final URI myUri; 084 085 public Statistics getStatistics() { 086 return statistics; 087 } 088 089 /** 090 * Returns true if the specified string is considered valid in the path part 091 * of a URI by this file system. The default implementation enforces the rules 092 * of HDFS, but subclasses may override this method to implement specific 093 * validation rules for specific file systems. 094 * 095 * @param src String source filename to check, path part of the URI 096 * @return boolean true if the specified string is considered valid 097 */ 098 public boolean isValidName(String src) { 099 // Prohibit ".." "." and anything containing ":" 100 StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR); 101 while(tokens.hasMoreTokens()) { 102 String element = tokens.nextToken(); 103 if (element.equals("..") || 104 element.equals(".") || 105 (element.indexOf(":") >= 0)) { 106 return false; 107 } 108 } 109 return true; 110 } 111 112 /** 113 * Create an object for the given class and initialize it from conf. 114 * @param theClass class of which an object is created 115 * @param conf Configuration 116 * @return a new object 117 */ 118 @SuppressWarnings("unchecked") 119 static <T> T newInstance(Class<T> theClass, 120 URI uri, Configuration conf) { 121 T result; 122 try { 123 Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass); 124 if (meth == null) { 125 meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS); 126 meth.setAccessible(true); 127 CONSTRUCTOR_CACHE.put(theClass, meth); 128 } 129 result = meth.newInstance(uri, conf); 130 } catch (Exception e) { 131 throw new RuntimeException(e); 132 } 133 return result; 134 } 135 136 /** 137 * Create a file system instance for the specified uri using the conf. The 138 * conf is used to find the class name that implements the file system. The 139 * conf is also passed to the file system for its configuration. 140 * 141 * @param uri URI of the file system 142 * @param conf Configuration for the file system 143 * 144 * @return Returns the file system for the given URI 145 * 146 * @throws UnsupportedFileSystemException file system for <code>uri</code> is 147 * not found 148 */ 149 public static AbstractFileSystem createFileSystem(URI uri, Configuration conf) 150 throws UnsupportedFileSystemException { 151 Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 152 uri.getScheme() + ".impl", null); 153 if (clazz == null) { 154 throw new UnsupportedFileSystemException( 155 "No AbstractFileSystem for scheme: " + uri.getScheme()); 156 } 157 return (AbstractFileSystem) newInstance(clazz, uri, conf); 158 } 159 160 /** 161 * Get the statistics for a particular file system. 162 * 163 * @param uri 164 * used as key to lookup STATISTICS_TABLE. Only scheme and authority 165 * part of the uri are used. 166 * @return a statistics object 167 */ 168 protected static synchronized Statistics getStatistics(URI uri) { 169 String scheme = uri.getScheme(); 170 if (scheme == null) { 171 throw new IllegalArgumentException("Scheme not defined in the uri: " 172 + uri); 173 } 174 URI baseUri = getBaseUri(uri); 175 Statistics result = STATISTICS_TABLE.get(baseUri); 176 if (result == null) { 177 result = new Statistics(scheme); 178 STATISTICS_TABLE.put(baseUri, result); 179 } 180 return result; 181 } 182 183 private static URI getBaseUri(URI uri) { 184 String scheme = uri.getScheme(); 185 String authority = uri.getAuthority(); 186 String baseUriString = scheme + "://"; 187 if (authority != null) { 188 baseUriString = baseUriString + authority; 189 } else { 190 baseUriString = baseUriString + "/"; 191 } 192 return URI.create(baseUriString); 193 } 194 195 public static synchronized void clearStatistics() { 196 for(Statistics stat: STATISTICS_TABLE.values()) { 197 stat.reset(); 198 } 199 } 200 201 /** 202 * Prints statistics for all file systems. 203 */ 204 public static synchronized void printStatistics() { 205 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 206 System.out.println(" FileSystem " + pair.getKey().getScheme() + "://" 207 + pair.getKey().getAuthority() + ": " + pair.getValue()); 208 } 209 } 210 211 protected static synchronized Map<URI, Statistics> getAllStatistics() { 212 Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>( 213 STATISTICS_TABLE.size()); 214 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 215 URI key = pair.getKey(); 216 Statistics value = pair.getValue(); 217 Statistics newStatsObj = new Statistics(value); 218 statsMap.put(URI.create(key.toString()), newStatsObj); 219 } 220 return statsMap; 221 } 222 223 /** 224 * The main factory method for creating a file system. Get a file system for 225 * the URI's scheme and authority. The scheme of the <code>uri</code> 226 * determines a configuration property name, 227 * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the 228 * AbstractFileSystem class. 229 * 230 * The entire URI and conf is passed to the AbstractFileSystem factory method. 231 * 232 * @param uri for the file system to be created. 233 * @param conf which is passed to the file system impl. 234 * 235 * @return file system for the given URI. 236 * 237 * @throws UnsupportedFileSystemException if the file system for 238 * <code>uri</code> is not supported. 239 */ 240 public static AbstractFileSystem get(final URI uri, final Configuration conf) 241 throws UnsupportedFileSystemException { 242 return createFileSystem(uri, conf); 243 } 244 245 /** 246 * Constructor to be called by subclasses. 247 * 248 * @param uri for this file system. 249 * @param supportedScheme the scheme supported by the implementor 250 * @param authorityNeeded if true then theURI must have authority, if false 251 * then the URI must have null authority. 252 * 253 * @throws URISyntaxException <code>uri</code> has syntax error 254 */ 255 public AbstractFileSystem(final URI uri, final String supportedScheme, 256 final boolean authorityNeeded, final int defaultPort) 257 throws URISyntaxException { 258 myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort); 259 statistics = getStatistics(uri); 260 } 261 262 /** 263 * Check that the Uri's scheme matches 264 * @param uri 265 * @param supportedScheme 266 */ 267 public void checkScheme(URI uri, String supportedScheme) { 268 String scheme = uri.getScheme(); 269 if (scheme == null) { 270 throw new HadoopIllegalArgumentException("Uri without scheme: " + uri); 271 } 272 if (!scheme.equals(supportedScheme)) { 273 throw new HadoopIllegalArgumentException("Uri scheme " + uri 274 + " does not match the scheme " + supportedScheme); 275 } 276 } 277 278 /** 279 * Get the URI for the file system based on the given URI. The path, query 280 * part of the given URI is stripped out and default file system port is used 281 * to form the URI. 282 * 283 * @param uri FileSystem URI. 284 * @param authorityNeeded if true authority cannot be null in the URI. If 285 * false authority must be null. 286 * @param defaultPort default port to use if port is not specified in the URI. 287 * 288 * @return URI of the file system 289 * 290 * @throws URISyntaxException <code>uri</code> has syntax error 291 */ 292 private URI getUri(URI uri, String supportedScheme, 293 boolean authorityNeeded, int defaultPort) throws URISyntaxException { 294 checkScheme(uri, supportedScheme); 295 // A file system implementation that requires authority must always 296 // specify default port 297 if (defaultPort < 0 && authorityNeeded) { 298 throw new HadoopIllegalArgumentException( 299 "FileSystem implementation error - default port " + defaultPort 300 + " is not valid"); 301 } 302 String authority = uri.getAuthority(); 303 if (authority == null) { 304 if (authorityNeeded) { 305 throw new HadoopIllegalArgumentException("Uri without authority: " + uri); 306 } else { 307 return new URI(supportedScheme + ":///"); 308 } 309 } 310 // authority is non null - AuthorityNeeded may be true or false. 311 int port = uri.getPort(); 312 port = (port == -1 ? defaultPort : port); 313 if (port == -1) { // no port supplied and default port is not specified 314 return new URI(supportedScheme, authority, "/", null); 315 } 316 return new URI(supportedScheme + "://" + uri.getHost() + ":" + port); 317 } 318 319 /** 320 * The default port of this file system. 321 * 322 * @return default port of this file system's Uri scheme 323 * A uri with a port of -1 => default port; 324 */ 325 public abstract int getUriDefaultPort(); 326 327 /** 328 * Returns a URI whose scheme and authority identify this FileSystem. 329 * 330 * @return the uri of this file system. 331 */ 332 public URI getUri() { 333 return myUri; 334 } 335 336 /** 337 * Check that a Path belongs to this FileSystem. 338 * 339 * If the path is fully qualified URI, then its scheme and authority 340 * matches that of this file system. Otherwise the path must be 341 * slash-relative name. 342 * 343 * @throws InvalidPathException if the path is invalid 344 */ 345 public void checkPath(Path path) { 346 URI uri = path.toUri(); 347 String thatScheme = uri.getScheme(); 348 String thatAuthority = uri.getAuthority(); 349 if (thatScheme == null) { 350 if (thatAuthority == null) { 351 if (path.isUriPathAbsolute()) { 352 return; 353 } 354 throw new InvalidPathException("relative paths not allowed:" + 355 path); 356 } else { 357 throw new InvalidPathException( 358 "Path without scheme with non-null authority:" + path); 359 } 360 } 361 String thisScheme = this.getUri().getScheme(); 362 String thisHost = this.getUri().getHost(); 363 String thatHost = uri.getHost(); 364 365 // Schemes and hosts must match. 366 // Allow for null Authority for file:/// 367 if (!thisScheme.equalsIgnoreCase(thatScheme) || 368 (thisHost != null && 369 !thisHost.equalsIgnoreCase(thatHost)) || 370 (thisHost == null && thatHost != null)) { 371 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 372 + this.getUri()); 373 } 374 375 // Ports must match, unless this FS instance is using the default port, in 376 // which case the port may be omitted from the given URI 377 int thisPort = this.getUri().getPort(); 378 int thatPort = uri.getPort(); 379 if (thatPort == -1) { // -1 => defaultPort of Uri scheme 380 thatPort = this.getUriDefaultPort(); 381 } 382 if (thisPort != thatPort) { 383 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 384 + this.getUri()); 385 } 386 } 387 388 /** 389 * Get the path-part of a pathname. Checks that URI matches this file system 390 * and that the path-part is a valid name. 391 * 392 * @param p path 393 * 394 * @return path-part of the Path p 395 */ 396 public String getUriPath(final Path p) { 397 checkPath(p); 398 String s = p.toUri().getPath(); 399 if (!isValidName(s)) { 400 throw new InvalidPathException("Path part " + s + " from URI " + p 401 + " is not a valid filename."); 402 } 403 return s; 404 } 405 406 /** 407 * Make the path fully qualified to this file system 408 * @param path 409 * @return the qualified path 410 */ 411 public Path makeQualified(Path path) { 412 checkPath(path); 413 return path.makeQualified(this.getUri(), null); 414 } 415 416 /** 417 * Some file systems like LocalFileSystem have an initial workingDir 418 * that is used as the starting workingDir. For other file systems 419 * like HDFS there is no built in notion of an initial workingDir. 420 * 421 * @return the initial workingDir if the file system has such a notion 422 * otherwise return a null. 423 */ 424 public Path getInitialWorkingDirectory() { 425 return null; 426 } 427 428 /** 429 * Return the current user's home directory in this file system. 430 * The default implementation returns "/user/$USER/". 431 * 432 * @return current user's home directory. 433 */ 434 public Path getHomeDirectory() { 435 return new Path("/user/"+System.getProperty("user.name")).makeQualified( 436 getUri(), null); 437 } 438 439 /** 440 * Return a set of server default configuration values. 441 * 442 * @return server default configuration values 443 * 444 * @throws IOException an I/O error occurred 445 */ 446 public abstract FsServerDefaults getServerDefaults() throws IOException; 447 448 /** 449 * Return the fully-qualified path of path f resolving the path 450 * through any internal symlinks or mount point 451 * @param p path to be resolved 452 * @return fully qualified path 453 * @throws FileNotFoundException, AccessControlException, IOException 454 * UnresolvedLinkException if symbolic link on path cannot be resolved 455 * internally 456 */ 457 public Path resolvePath(final Path p) throws FileNotFoundException, 458 UnresolvedLinkException, AccessControlException, IOException { 459 checkPath(p); 460 return getFileStatus(p).getPath(); // default impl is to return the path 461 } 462 463 /** 464 * The specification of this method matches that of 465 * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except 466 * that the Path f must be fully qualified and the permission is absolute 467 * (i.e. umask has been applied). 468 */ 469 public final FSDataOutputStream create(final Path f, 470 final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) 471 throws AccessControlException, FileAlreadyExistsException, 472 FileNotFoundException, ParentNotDirectoryException, 473 UnsupportedFileSystemException, UnresolvedLinkException, IOException { 474 checkPath(f); 475 int bufferSize = -1; 476 short replication = -1; 477 long blockSize = -1; 478 int bytesPerChecksum = -1; 479 ChecksumOpt checksumOpt = null; 480 FsPermission permission = null; 481 Progressable progress = null; 482 Boolean createParent = null; 483 484 for (CreateOpts iOpt : opts) { 485 if (CreateOpts.BlockSize.class.isInstance(iOpt)) { 486 if (blockSize != -1) { 487 throw new HadoopIllegalArgumentException( 488 "BlockSize option is set multiple times"); 489 } 490 blockSize = ((CreateOpts.BlockSize) iOpt).getValue(); 491 } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) { 492 if (bufferSize != -1) { 493 throw new HadoopIllegalArgumentException( 494 "BufferSize option is set multiple times"); 495 } 496 bufferSize = ((CreateOpts.BufferSize) iOpt).getValue(); 497 } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) { 498 if (replication != -1) { 499 throw new HadoopIllegalArgumentException( 500 "ReplicationFactor option is set multiple times"); 501 } 502 replication = ((CreateOpts.ReplicationFactor) iOpt).getValue(); 503 } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) { 504 if (bytesPerChecksum != -1) { 505 throw new HadoopIllegalArgumentException( 506 "BytesPerChecksum option is set multiple times"); 507 } 508 bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue(); 509 } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) { 510 if (checksumOpt != null) { 511 throw new HadoopIllegalArgumentException( 512 "CreateChecksumType option is set multiple times"); 513 } 514 checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue(); 515 } else if (CreateOpts.Perms.class.isInstance(iOpt)) { 516 if (permission != null) { 517 throw new HadoopIllegalArgumentException( 518 "Perms option is set multiple times"); 519 } 520 permission = ((CreateOpts.Perms) iOpt).getValue(); 521 } else if (CreateOpts.Progress.class.isInstance(iOpt)) { 522 if (progress != null) { 523 throw new HadoopIllegalArgumentException( 524 "Progress option is set multiple times"); 525 } 526 progress = ((CreateOpts.Progress) iOpt).getValue(); 527 } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) { 528 if (createParent != null) { 529 throw new HadoopIllegalArgumentException( 530 "CreateParent option is set multiple times"); 531 } 532 createParent = ((CreateOpts.CreateParent) iOpt).getValue(); 533 } else { 534 throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " + 535 iOpt.getClass().getName()); 536 } 537 } 538 if (permission == null) { 539 throw new HadoopIllegalArgumentException("no permission supplied"); 540 } 541 542 543 FsServerDefaults ssDef = getServerDefaults(); 544 if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) { 545 throw new IOException("Internal error: default blockSize is" + 546 " not a multiple of default bytesPerChecksum "); 547 } 548 549 if (blockSize == -1) { 550 blockSize = ssDef.getBlockSize(); 551 } 552 553 // Create a checksum option honoring user input as much as possible. 554 // If bytesPerChecksum is specified, it will override the one set in 555 // checksumOpt. Any missing value will be filled in using the default. 556 ChecksumOpt defaultOpt = new ChecksumOpt( 557 ssDef.getChecksumType(), 558 ssDef.getBytesPerChecksum()); 559 checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt, 560 checksumOpt, bytesPerChecksum); 561 562 if (bufferSize == -1) { 563 bufferSize = ssDef.getFileBufferSize(); 564 } 565 if (replication == -1) { 566 replication = ssDef.getReplication(); 567 } 568 if (createParent == null) { 569 createParent = false; 570 } 571 572 if (blockSize % bytesPerChecksum != 0) { 573 throw new HadoopIllegalArgumentException( 574 "blockSize should be a multiple of checksumsize"); 575 } 576 577 return this.createInternal(f, createFlag, permission, bufferSize, 578 replication, blockSize, progress, checksumOpt, createParent); 579 } 580 581 /** 582 * The specification of this method matches that of 583 * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts 584 * have been declared explicitly. 585 */ 586 public abstract FSDataOutputStream createInternal(Path f, 587 EnumSet<CreateFlag> flag, FsPermission absolutePermission, 588 int bufferSize, short replication, long blockSize, Progressable progress, 589 ChecksumOpt checksumOpt, boolean createParent) 590 throws AccessControlException, FileAlreadyExistsException, 591 FileNotFoundException, ParentNotDirectoryException, 592 UnsupportedFileSystemException, UnresolvedLinkException, IOException; 593 594 /** 595 * The specification of this method matches that of 596 * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path 597 * f must be fully qualified and the permission is absolute (i.e. 598 * umask has been applied). 599 */ 600 public abstract void mkdir(final Path dir, final FsPermission permission, 601 final boolean createParent) throws AccessControlException, 602 FileAlreadyExistsException, FileNotFoundException, 603 UnresolvedLinkException, IOException; 604 605 /** 606 * The specification of this method matches that of 607 * {@link FileContext#delete(Path, boolean)} except that Path f must be for 608 * this file system. 609 */ 610 public abstract boolean delete(final Path f, final boolean recursive) 611 throws AccessControlException, FileNotFoundException, 612 UnresolvedLinkException, IOException; 613 614 /** 615 * The specification of this method matches that of 616 * {@link FileContext#open(Path)} except that Path f must be for this 617 * file system. 618 */ 619 public FSDataInputStream open(final Path f) throws AccessControlException, 620 FileNotFoundException, UnresolvedLinkException, IOException { 621 return open(f, getServerDefaults().getFileBufferSize()); 622 } 623 624 /** 625 * The specification of this method matches that of 626 * {@link FileContext#open(Path, int)} except that Path f must be for this 627 * file system. 628 */ 629 public abstract FSDataInputStream open(final Path f, int bufferSize) 630 throws AccessControlException, FileNotFoundException, 631 UnresolvedLinkException, IOException; 632 633 /** 634 * The specification of this method matches that of 635 * {@link FileContext#setReplication(Path, short)} except that Path f must be 636 * for this file system. 637 */ 638 public abstract boolean setReplication(final Path f, 639 final short replication) throws AccessControlException, 640 FileNotFoundException, UnresolvedLinkException, IOException; 641 642 /** 643 * The specification of this method matches that of 644 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 645 * f must be for this file system. 646 */ 647 public final void rename(final Path src, final Path dst, 648 final Options.Rename... options) throws AccessControlException, 649 FileAlreadyExistsException, FileNotFoundException, 650 ParentNotDirectoryException, UnresolvedLinkException, IOException { 651 boolean overwrite = false; 652 if (null != options) { 653 for (Rename option : options) { 654 if (option == Rename.OVERWRITE) { 655 overwrite = true; 656 } 657 } 658 } 659 renameInternal(src, dst, overwrite); 660 } 661 662 /** 663 * The specification of this method matches that of 664 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 665 * f must be for this file system and NO OVERWRITE is performed. 666 * 667 * File systems that do not have a built in overwrite need implement only this 668 * method and can take advantage of the default impl of the other 669 * {@link #renameInternal(Path, Path, boolean)} 670 */ 671 public abstract void renameInternal(final Path src, final Path dst) 672 throws AccessControlException, FileAlreadyExistsException, 673 FileNotFoundException, ParentNotDirectoryException, 674 UnresolvedLinkException, IOException; 675 676 /** 677 * The specification of this method matches that of 678 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 679 * f must be for this file system. 680 */ 681 public void renameInternal(final Path src, final Path dst, 682 boolean overwrite) throws AccessControlException, 683 FileAlreadyExistsException, FileNotFoundException, 684 ParentNotDirectoryException, UnresolvedLinkException, IOException { 685 // Default implementation deals with overwrite in a non-atomic way 686 final FileStatus srcStatus = getFileLinkStatus(src); 687 688 FileStatus dstStatus; 689 try { 690 dstStatus = getFileLinkStatus(dst); 691 } catch (IOException e) { 692 dstStatus = null; 693 } 694 if (dstStatus != null) { 695 if (dst.equals(src)) { 696 throw new FileAlreadyExistsException( 697 "The source "+src+" and destination "+dst+" are the same"); 698 } 699 if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) { 700 throw new FileAlreadyExistsException( 701 "Cannot rename symlink "+src+" to its target "+dst); 702 } 703 // It's OK to rename a file to a symlink and vice versa 704 if (srcStatus.isDirectory() != dstStatus.isDirectory()) { 705 throw new IOException("Source " + src + " and destination " + dst 706 + " must both be directories"); 707 } 708 if (!overwrite) { 709 throw new FileAlreadyExistsException("Rename destination " + dst 710 + " already exists."); 711 } 712 // Delete the destination that is a file or an empty directory 713 if (dstStatus.isDirectory()) { 714 RemoteIterator<FileStatus> list = listStatusIterator(dst); 715 if (list != null && list.hasNext()) { 716 throw new IOException( 717 "Rename cannot overwrite non empty destination directory " + dst); 718 } 719 } 720 delete(dst, false); 721 } else { 722 final Path parent = dst.getParent(); 723 final FileStatus parentStatus = getFileStatus(parent); 724 if (parentStatus.isFile()) { 725 throw new ParentNotDirectoryException("Rename destination parent " 726 + parent + " is a file."); 727 } 728 } 729 renameInternal(src, dst); 730 } 731 732 /** 733 * Returns true if the file system supports symlinks, false otherwise. 734 * @return true if filesystem supports symlinks 735 */ 736 public boolean supportsSymlinks() { 737 return false; 738 } 739 740 /** 741 * The specification of this method matches that of 742 * {@link FileContext#createSymlink(Path, Path, boolean)}; 743 */ 744 public void createSymlink(final Path target, final Path link, 745 final boolean createParent) throws IOException, UnresolvedLinkException { 746 throw new IOException("File system does not support symlinks"); 747 } 748 749 /** 750 * Partially resolves the path. This is used during symlink resolution in 751 * {@link FSLinkResolver}, and differs from the similarly named method 752 * {@link FileContext#getLinkTarget(Path)}. 753 */ 754 public Path getLinkTarget(final Path f) throws IOException { 755 /* We should never get here. Any file system that threw an 756 * UnresolvedLinkException, causing this function to be called, 757 * needs to override this method. 758 */ 759 throw new AssertionError(); 760 } 761 762 /** 763 * The specification of this method matches that of 764 * {@link FileContext#setPermission(Path, FsPermission)} except that Path f 765 * must be for this file system. 766 */ 767 public abstract void setPermission(final Path f, 768 final FsPermission permission) throws AccessControlException, 769 FileNotFoundException, UnresolvedLinkException, IOException; 770 771 /** 772 * The specification of this method matches that of 773 * {@link FileContext#setOwner(Path, String, String)} except that Path f must 774 * be for this file system. 775 */ 776 public abstract void setOwner(final Path f, final String username, 777 final String groupname) throws AccessControlException, 778 FileNotFoundException, UnresolvedLinkException, IOException; 779 780 /** 781 * The specification of this method matches that of 782 * {@link FileContext#setTimes(Path, long, long)} except that Path f must be 783 * for this file system. 784 */ 785 public abstract void setTimes(final Path f, final long mtime, 786 final long atime) throws AccessControlException, FileNotFoundException, 787 UnresolvedLinkException, IOException; 788 789 /** 790 * The specification of this method matches that of 791 * {@link FileContext#getFileChecksum(Path)} except that Path f must be for 792 * this file system. 793 */ 794 public abstract FileChecksum getFileChecksum(final Path f) 795 throws AccessControlException, FileNotFoundException, 796 UnresolvedLinkException, IOException; 797 798 /** 799 * The specification of this method matches that of 800 * {@link FileContext#getFileStatus(Path)} 801 * except that an UnresolvedLinkException may be thrown if a symlink is 802 * encountered in the path. 803 */ 804 public abstract FileStatus getFileStatus(final Path f) 805 throws AccessControlException, FileNotFoundException, 806 UnresolvedLinkException, IOException; 807 808 /** 809 * The specification of this method matches that of 810 * {@link FileContext#getFileLinkStatus(Path)} 811 * except that an UnresolvedLinkException may be thrown if a symlink is 812 * encountered in the path leading up to the final path component. 813 * If the file system does not support symlinks then the behavior is 814 * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}. 815 */ 816 public FileStatus getFileLinkStatus(final Path f) 817 throws AccessControlException, FileNotFoundException, 818 UnsupportedFileSystemException, IOException { 819 return getFileStatus(f); 820 } 821 822 /** 823 * The specification of this method matches that of 824 * {@link FileContext#getFileBlockLocations(Path, long, long)} except that 825 * Path f must be for this file system. 826 */ 827 public abstract BlockLocation[] getFileBlockLocations(final Path f, 828 final long start, final long len) throws AccessControlException, 829 FileNotFoundException, UnresolvedLinkException, IOException; 830 831 /** 832 * The specification of this method matches that of 833 * {@link FileContext#getFsStatus(Path)} except that Path f must be for this 834 * file system. 835 */ 836 public FsStatus getFsStatus(final Path f) throws AccessControlException, 837 FileNotFoundException, UnresolvedLinkException, IOException { 838 // default impl gets FsStatus of root 839 return getFsStatus(); 840 } 841 842 /** 843 * The specification of this method matches that of 844 * {@link FileContext#getFsStatus(Path)}. 845 */ 846 public abstract FsStatus getFsStatus() throws AccessControlException, 847 FileNotFoundException, IOException; 848 849 /** 850 * The specification of this method matches that of 851 * {@link FileContext#listStatus(Path)} except that Path f must be for this 852 * file system. 853 */ 854 public RemoteIterator<FileStatus> listStatusIterator(final Path f) 855 throws AccessControlException, FileNotFoundException, 856 UnresolvedLinkException, IOException { 857 return new RemoteIterator<FileStatus>() { 858 private int i = 0; 859 private FileStatus[] statusList = listStatus(f); 860 861 @Override 862 public boolean hasNext() { 863 return i < statusList.length; 864 } 865 866 @Override 867 public FileStatus next() { 868 if (!hasNext()) { 869 throw new NoSuchElementException(); 870 } 871 return statusList[i++]; 872 } 873 }; 874 } 875 876 /** 877 * The specification of this method matches that of 878 * {@link FileContext#listLocatedStatus(Path)} except that Path f 879 * must be for this file system. 880 */ 881 public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f) 882 throws AccessControlException, FileNotFoundException, 883 UnresolvedLinkException, IOException { 884 return new RemoteIterator<LocatedFileStatus>() { 885 private RemoteIterator<FileStatus> itor = listStatusIterator(f); 886 887 @Override 888 public boolean hasNext() throws IOException { 889 return itor.hasNext(); 890 } 891 892 @Override 893 public LocatedFileStatus next() throws IOException { 894 if (!hasNext()) { 895 throw new NoSuchElementException("No more entry in " + f); 896 } 897 FileStatus result = itor.next(); 898 BlockLocation[] locs = null; 899 if (result.isFile()) { 900 locs = getFileBlockLocations( 901 result.getPath(), 0, result.getLen()); 902 } 903 return new LocatedFileStatus(result, locs); 904 } 905 }; 906 } 907 908 /** 909 * The specification of this method matches that of 910 * {@link FileContext.Util#listStatus(Path)} except that Path f must be 911 * for this file system. 912 */ 913 public abstract FileStatus[] listStatus(final Path f) 914 throws AccessControlException, FileNotFoundException, 915 UnresolvedLinkException, IOException; 916 917 /** 918 * @return an iterator over the corrupt files under the given path 919 * (may contain duplicates if a file has more than one corrupt block) 920 * @throws IOException 921 */ 922 public RemoteIterator<Path> listCorruptFileBlocks(Path path) 923 throws IOException { 924 throw new UnsupportedOperationException(getClass().getCanonicalName() + 925 " does not support" + 926 " listCorruptFileBlocks"); 927 } 928 929 /** 930 * The specification of this method matches that of 931 * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f 932 * must be for this file system. 933 */ 934 public abstract void setVerifyChecksum(final boolean verifyChecksum) 935 throws AccessControlException, IOException; 936 937 /** 938 * Get a canonical name for this file system. 939 * @return a URI string that uniquely identifies this file system 940 */ 941 public String getCanonicalServiceName() { 942 return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort()); 943 } 944 945 /** 946 * Get one or more delegation tokens associated with the filesystem. Normally 947 * a file system returns a single delegation token. A file system that manages 948 * multiple file systems underneath, could return set of delegation tokens for 949 * all the file systems it manages 950 * 951 * @param renewer the account name that is allowed to renew the token. 952 * @return List of delegation tokens. 953 * If delegation tokens not supported then return a list of size zero. 954 * @throws IOException 955 */ 956 @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" }) 957 public List<Token<?>> getDelegationTokens(String renewer) throws IOException { 958 return new ArrayList<Token<?>>(0); 959 } 960 961 /** 962 * Modifies ACL entries of files and directories. This method can add new ACL 963 * entries or modify the permissions on existing ACL entries. All existing 964 * ACL entries that are not specified in this call are retained without 965 * changes. (Modifications are merged into the current ACL.) 966 * 967 * @param path Path to modify 968 * @param aclSpec List<AclEntry> describing modifications 969 * @throws IOException if an ACL could not be modified 970 */ 971 public void modifyAclEntries(Path path, List<AclEntry> aclSpec) 972 throws IOException { 973 throw new UnsupportedOperationException(getClass().getSimpleName() 974 + " doesn't support modifyAclEntries"); 975 } 976 977 /** 978 * Removes ACL entries from files and directories. Other ACL entries are 979 * retained. 980 * 981 * @param path Path to modify 982 * @param aclSpec List<AclEntry> describing entries to remove 983 * @throws IOException if an ACL could not be modified 984 */ 985 public void removeAclEntries(Path path, List<AclEntry> aclSpec) 986 throws IOException { 987 throw new UnsupportedOperationException(getClass().getSimpleName() 988 + " doesn't support removeAclEntries"); 989 } 990 991 /** 992 * Removes all default ACL entries from files and directories. 993 * 994 * @param path Path to modify 995 * @throws IOException if an ACL could not be modified 996 */ 997 public void removeDefaultAcl(Path path) 998 throws IOException { 999 throw new UnsupportedOperationException(getClass().getSimpleName() 1000 + " doesn't support removeDefaultAcl"); 1001 } 1002 1003 /** 1004 * Removes all but the base ACL entries of files and directories. The entries 1005 * for user, group, and others are retained for compatibility with permission 1006 * bits. 1007 * 1008 * @param path Path to modify 1009 * @throws IOException if an ACL could not be removed 1010 */ 1011 public void removeAcl(Path path) 1012 throws IOException { 1013 throw new UnsupportedOperationException(getClass().getSimpleName() 1014 + " doesn't support removeAcl"); 1015 } 1016 1017 /** 1018 * Fully replaces ACL of files and directories, discarding all existing 1019 * entries. 1020 * 1021 * @param path Path to modify 1022 * @param aclSpec List<AclEntry> describing modifications, must include entries 1023 * for user, group, and others for compatibility with permission bits. 1024 * @throws IOException if an ACL could not be modified 1025 */ 1026 public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { 1027 throw new UnsupportedOperationException(getClass().getSimpleName() 1028 + " doesn't support setAcl"); 1029 } 1030 1031 /** 1032 * Gets the ACLs of files and directories. 1033 * 1034 * @param path Path to get 1035 * @return RemoteIterator<AclStatus> which returns each AclStatus 1036 * @throws IOException if an ACL could not be read 1037 */ 1038 public AclStatus getAclStatus(Path path) throws IOException { 1039 throw new UnsupportedOperationException(getClass().getSimpleName() 1040 + " doesn't support getAclStatus"); 1041 } 1042 1043 @Override //Object 1044 public int hashCode() { 1045 return myUri.hashCode(); 1046 } 1047 1048 @Override //Object 1049 public boolean equals(Object other) { 1050 if (other == null || !(other instanceof AbstractFileSystem)) { 1051 return false; 1052 } 1053 return myUri.equals(((AbstractFileSystem) other).myUri); 1054 } 1055 }