001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.fs; 019 020import java.io.FileNotFoundException; 021import java.io.IOException; 022import java.lang.reflect.Constructor; 023import java.net.URI; 024import java.net.URISyntaxException; 025import java.util.ArrayList; 026import java.util.EnumSet; 027import java.util.HashMap; 028import java.util.List; 029import java.util.Map; 030import java.util.NoSuchElementException; 031import java.util.StringTokenizer; 032import java.util.concurrent.ConcurrentHashMap; 033 034import org.apache.commons.logging.Log; 035import org.apache.commons.logging.LogFactory; 036import org.apache.hadoop.HadoopIllegalArgumentException; 037import org.apache.hadoop.classification.InterfaceAudience; 038import org.apache.hadoop.classification.InterfaceStability; 039import org.apache.hadoop.conf.Configuration; 040import org.apache.hadoop.fs.FileSystem.Statistics; 041import org.apache.hadoop.fs.Options.ChecksumOpt; 042import org.apache.hadoop.fs.Options.CreateOpts; 043import org.apache.hadoop.fs.Options.Rename; 044import org.apache.hadoop.fs.permission.AclEntry; 045import org.apache.hadoop.fs.permission.AclStatus; 046import org.apache.hadoop.fs.permission.FsAction; 047import org.apache.hadoop.fs.permission.FsPermission; 048import org.apache.hadoop.fs.InvalidPathException; 049import org.apache.hadoop.security.AccessControlException; 050import org.apache.hadoop.security.SecurityUtil; 051import org.apache.hadoop.security.token.Token; 052import org.apache.hadoop.util.Progressable; 053 054/** 055 * This class provides an interface for implementors of a Hadoop file system 056 * (analogous to the VFS of Unix). Applications do not access this class; 057 * instead they access files across all file systems using {@link FileContext}. 058 * 059 * Pathnames passed to AbstractFileSystem can be fully qualified URI that 060 * matches the "this" file system (ie same scheme and authority) 061 * or a Slash-relative name that is assumed to be relative 062 * to the root of the "this" file system . 063 */ 064@InterfaceAudience.Public 065@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ 066public abstract class AbstractFileSystem { 067 static final Log LOG = LogFactory.getLog(AbstractFileSystem.class); 068 069 /** Recording statistics per a file system class. */ 070 private static final Map<URI, Statistics> 071 STATISTICS_TABLE = new HashMap<URI, Statistics>(); 072 073 /** Cache of constructors for each file system class. */ 074 private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 075 new ConcurrentHashMap<Class<?>, Constructor<?>>(); 076 077 private static final Class<?>[] URI_CONFIG_ARGS = 078 new Class[]{URI.class, Configuration.class}; 079 080 /** The statistics for this file system. */ 081 protected Statistics statistics; 082 083 private final URI myUri; 084 085 public Statistics getStatistics() { 086 return statistics; 087 } 088 089 /** 090 * Returns true if the specified string is considered valid in the path part 091 * of a URI by this file system. The default implementation enforces the rules 092 * of HDFS, but subclasses may override this method to implement specific 093 * validation rules for specific file systems. 094 * 095 * @param src String source filename to check, path part of the URI 096 * @return boolean true if the specified string is considered valid 097 */ 098 public boolean isValidName(String src) { 099 // Prohibit ".." "." and anything containing ":" 100 StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR); 101 while(tokens.hasMoreTokens()) { 102 String element = tokens.nextToken(); 103 if (element.equals("..") || 104 element.equals(".") || 105 (element.indexOf(":") >= 0)) { 106 return false; 107 } 108 } 109 return true; 110 } 111 112 /** 113 * Create an object for the given class and initialize it from conf. 114 * @param theClass class of which an object is created 115 * @param conf Configuration 116 * @return a new object 117 */ 118 @SuppressWarnings("unchecked") 119 static <T> T newInstance(Class<T> theClass, 120 URI uri, Configuration conf) { 121 T result; 122 try { 123 Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass); 124 if (meth == null) { 125 meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS); 126 meth.setAccessible(true); 127 CONSTRUCTOR_CACHE.put(theClass, meth); 128 } 129 result = meth.newInstance(uri, conf); 130 } catch (Exception e) { 131 throw new RuntimeException(e); 132 } 133 return result; 134 } 135 136 /** 137 * Create a file system instance for the specified uri using the conf. The 138 * conf is used to find the class name that implements the file system. The 139 * conf is also passed to the file system for its configuration. 140 * 141 * @param uri URI of the file system 142 * @param conf Configuration for the file system 143 * 144 * @return Returns the file system for the given URI 145 * 146 * @throws UnsupportedFileSystemException file system for <code>uri</code> is 147 * not found 148 */ 149 public static AbstractFileSystem createFileSystem(URI uri, Configuration conf) 150 throws UnsupportedFileSystemException { 151 Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 152 uri.getScheme() + ".impl", null); 153 if (clazz == null) { 154 throw new UnsupportedFileSystemException( 155 "No AbstractFileSystem for scheme: " + uri.getScheme()); 156 } 157 return (AbstractFileSystem) newInstance(clazz, uri, conf); 158 } 159 160 /** 161 * Get the statistics for a particular file system. 162 * 163 * @param uri 164 * used as key to lookup STATISTICS_TABLE. Only scheme and authority 165 * part of the uri are used. 166 * @return a statistics object 167 */ 168 protected static synchronized Statistics getStatistics(URI uri) { 169 String scheme = uri.getScheme(); 170 if (scheme == null) { 171 throw new IllegalArgumentException("Scheme not defined in the uri: " 172 + uri); 173 } 174 URI baseUri = getBaseUri(uri); 175 Statistics result = STATISTICS_TABLE.get(baseUri); 176 if (result == null) { 177 result = new Statistics(scheme); 178 STATISTICS_TABLE.put(baseUri, result); 179 } 180 return result; 181 } 182 183 private static URI getBaseUri(URI uri) { 184 String scheme = uri.getScheme(); 185 String authority = uri.getAuthority(); 186 String baseUriString = scheme + "://"; 187 if (authority != null) { 188 baseUriString = baseUriString + authority; 189 } else { 190 baseUriString = baseUriString + "/"; 191 } 192 return URI.create(baseUriString); 193 } 194 195 public static synchronized void clearStatistics() { 196 for(Statistics stat: STATISTICS_TABLE.values()) { 197 stat.reset(); 198 } 199 } 200 201 /** 202 * Prints statistics for all file systems. 203 */ 204 public static synchronized void printStatistics() { 205 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 206 System.out.println(" FileSystem " + pair.getKey().getScheme() + "://" 207 + pair.getKey().getAuthority() + ": " + pair.getValue()); 208 } 209 } 210 211 protected static synchronized Map<URI, Statistics> getAllStatistics() { 212 Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>( 213 STATISTICS_TABLE.size()); 214 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 215 URI key = pair.getKey(); 216 Statistics value = pair.getValue(); 217 Statistics newStatsObj = new Statistics(value); 218 statsMap.put(URI.create(key.toString()), newStatsObj); 219 } 220 return statsMap; 221 } 222 223 /** 224 * The main factory method for creating a file system. Get a file system for 225 * the URI's scheme and authority. The scheme of the <code>uri</code> 226 * determines a configuration property name, 227 * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the 228 * AbstractFileSystem class. 229 * 230 * The entire URI and conf is passed to the AbstractFileSystem factory method. 231 * 232 * @param uri for the file system to be created. 233 * @param conf which is passed to the file system impl. 234 * 235 * @return file system for the given URI. 236 * 237 * @throws UnsupportedFileSystemException if the file system for 238 * <code>uri</code> is not supported. 239 */ 240 public static AbstractFileSystem get(final URI uri, final Configuration conf) 241 throws UnsupportedFileSystemException { 242 return createFileSystem(uri, conf); 243 } 244 245 /** 246 * Constructor to be called by subclasses. 247 * 248 * @param uri for this file system. 249 * @param supportedScheme the scheme supported by the implementor 250 * @param authorityNeeded if true then theURI must have authority, if false 251 * then the URI must have null authority. 252 * 253 * @throws URISyntaxException <code>uri</code> has syntax error 254 */ 255 public AbstractFileSystem(final URI uri, final String supportedScheme, 256 final boolean authorityNeeded, final int defaultPort) 257 throws URISyntaxException { 258 myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort); 259 statistics = getStatistics(uri); 260 } 261 262 /** 263 * Check that the Uri's scheme matches 264 * @param uri 265 * @param supportedScheme 266 */ 267 public void checkScheme(URI uri, String supportedScheme) { 268 String scheme = uri.getScheme(); 269 if (scheme == null) { 270 throw new HadoopIllegalArgumentException("Uri without scheme: " + uri); 271 } 272 if (!scheme.equals(supportedScheme)) { 273 throw new HadoopIllegalArgumentException("Uri scheme " + uri 274 + " does not match the scheme " + supportedScheme); 275 } 276 } 277 278 /** 279 * Get the URI for the file system based on the given URI. The path, query 280 * part of the given URI is stripped out and default file system port is used 281 * to form the URI. 282 * 283 * @param uri FileSystem URI. 284 * @param authorityNeeded if true authority cannot be null in the URI. If 285 * false authority must be null. 286 * @param defaultPort default port to use if port is not specified in the URI. 287 * 288 * @return URI of the file system 289 * 290 * @throws URISyntaxException <code>uri</code> has syntax error 291 */ 292 private URI getUri(URI uri, String supportedScheme, 293 boolean authorityNeeded, int defaultPort) throws URISyntaxException { 294 checkScheme(uri, supportedScheme); 295 // A file system implementation that requires authority must always 296 // specify default port 297 if (defaultPort < 0 && authorityNeeded) { 298 throw new HadoopIllegalArgumentException( 299 "FileSystem implementation error - default port " + defaultPort 300 + " is not valid"); 301 } 302 String authority = uri.getAuthority(); 303 if (authority == null) { 304 if (authorityNeeded) { 305 throw new HadoopIllegalArgumentException("Uri without authority: " + uri); 306 } else { 307 return new URI(supportedScheme + ":///"); 308 } 309 } 310 // authority is non null - AuthorityNeeded may be true or false. 311 int port = uri.getPort(); 312 port = (port == -1 ? defaultPort : port); 313 if (port == -1) { // no port supplied and default port is not specified 314 return new URI(supportedScheme, authority, "/", null); 315 } 316 return new URI(supportedScheme + "://" + uri.getHost() + ":" + port); 317 } 318 319 /** 320 * The default port of this file system. 321 * 322 * @return default port of this file system's Uri scheme 323 * A uri with a port of -1 => default port; 324 */ 325 public abstract int getUriDefaultPort(); 326 327 /** 328 * Returns a URI whose scheme and authority identify this FileSystem. 329 * 330 * @return the uri of this file system. 331 */ 332 public URI getUri() { 333 return myUri; 334 } 335 336 /** 337 * Check that a Path belongs to this FileSystem. 338 * 339 * If the path is fully qualified URI, then its scheme and authority 340 * matches that of this file system. Otherwise the path must be 341 * slash-relative name. 342 * 343 * @throws InvalidPathException if the path is invalid 344 */ 345 public void checkPath(Path path) { 346 URI uri = path.toUri(); 347 String thatScheme = uri.getScheme(); 348 String thatAuthority = uri.getAuthority(); 349 if (thatScheme == null) { 350 if (thatAuthority == null) { 351 if (path.isUriPathAbsolute()) { 352 return; 353 } 354 throw new InvalidPathException("relative paths not allowed:" + 355 path); 356 } else { 357 throw new InvalidPathException( 358 "Path without scheme with non-null authority:" + path); 359 } 360 } 361 String thisScheme = this.getUri().getScheme(); 362 String thisHost = this.getUri().getHost(); 363 String thatHost = uri.getHost(); 364 365 // Schemes and hosts must match. 366 // Allow for null Authority for file:/// 367 if (!thisScheme.equalsIgnoreCase(thatScheme) || 368 (thisHost != null && 369 !thisHost.equalsIgnoreCase(thatHost)) || 370 (thisHost == null && thatHost != null)) { 371 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 372 + this.getUri()); 373 } 374 375 // Ports must match, unless this FS instance is using the default port, in 376 // which case the port may be omitted from the given URI 377 int thisPort = this.getUri().getPort(); 378 int thatPort = uri.getPort(); 379 if (thatPort == -1) { // -1 => defaultPort of Uri scheme 380 thatPort = this.getUriDefaultPort(); 381 } 382 if (thisPort != thatPort) { 383 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 384 + this.getUri()); 385 } 386 } 387 388 /** 389 * Get the path-part of a pathname. Checks that URI matches this file system 390 * and that the path-part is a valid name. 391 * 392 * @param p path 393 * 394 * @return path-part of the Path p 395 */ 396 public String getUriPath(final Path p) { 397 checkPath(p); 398 String s = p.toUri().getPath(); 399 if (!isValidName(s)) { 400 throw new InvalidPathException("Path part " + s + " from URI " + p 401 + " is not a valid filename."); 402 } 403 return s; 404 } 405 406 /** 407 * Make the path fully qualified to this file system 408 * @param path 409 * @return the qualified path 410 */ 411 public Path makeQualified(Path path) { 412 checkPath(path); 413 return path.makeQualified(this.getUri(), null); 414 } 415 416 /** 417 * Some file systems like LocalFileSystem have an initial workingDir 418 * that is used as the starting workingDir. For other file systems 419 * like HDFS there is no built in notion of an initial workingDir. 420 * 421 * @return the initial workingDir if the file system has such a notion 422 * otherwise return a null. 423 */ 424 public Path getInitialWorkingDirectory() { 425 return null; 426 } 427 428 /** 429 * Return the current user's home directory in this file system. 430 * The default implementation returns "/user/$USER/". 431 * 432 * @return current user's home directory. 433 */ 434 public Path getHomeDirectory() { 435 return new Path("/user/"+System.getProperty("user.name")).makeQualified( 436 getUri(), null); 437 } 438 439 /** 440 * Return a set of server default configuration values. 441 * 442 * @return server default configuration values 443 * 444 * @throws IOException an I/O error occurred 445 */ 446 public abstract FsServerDefaults getServerDefaults() throws IOException; 447 448 /** 449 * Return the fully-qualified path of path f resolving the path 450 * through any internal symlinks or mount point 451 * @param p path to be resolved 452 * @return fully qualified path 453 * @throws FileNotFoundException, AccessControlException, IOException 454 * UnresolvedLinkException if symbolic link on path cannot be resolved 455 * internally 456 */ 457 public Path resolvePath(final Path p) throws FileNotFoundException, 458 UnresolvedLinkException, AccessControlException, IOException { 459 checkPath(p); 460 return getFileStatus(p).getPath(); // default impl is to return the path 461 } 462 463 /** 464 * The specification of this method matches that of 465 * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except 466 * that the Path f must be fully qualified and the permission is absolute 467 * (i.e. umask has been applied). 468 */ 469 public final FSDataOutputStream create(final Path f, 470 final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) 471 throws AccessControlException, FileAlreadyExistsException, 472 FileNotFoundException, ParentNotDirectoryException, 473 UnsupportedFileSystemException, UnresolvedLinkException, IOException { 474 checkPath(f); 475 int bufferSize = -1; 476 short replication = -1; 477 long blockSize = -1; 478 int bytesPerChecksum = -1; 479 ChecksumOpt checksumOpt = null; 480 FsPermission permission = null; 481 Progressable progress = null; 482 Boolean createParent = null; 483 484 for (CreateOpts iOpt : opts) { 485 if (CreateOpts.BlockSize.class.isInstance(iOpt)) { 486 if (blockSize != -1) { 487 throw new HadoopIllegalArgumentException( 488 "BlockSize option is set multiple times"); 489 } 490 blockSize = ((CreateOpts.BlockSize) iOpt).getValue(); 491 } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) { 492 if (bufferSize != -1) { 493 throw new HadoopIllegalArgumentException( 494 "BufferSize option is set multiple times"); 495 } 496 bufferSize = ((CreateOpts.BufferSize) iOpt).getValue(); 497 } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) { 498 if (replication != -1) { 499 throw new HadoopIllegalArgumentException( 500 "ReplicationFactor option is set multiple times"); 501 } 502 replication = ((CreateOpts.ReplicationFactor) iOpt).getValue(); 503 } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) { 504 if (bytesPerChecksum != -1) { 505 throw new HadoopIllegalArgumentException( 506 "BytesPerChecksum option is set multiple times"); 507 } 508 bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue(); 509 } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) { 510 if (checksumOpt != null) { 511 throw new HadoopIllegalArgumentException( 512 "CreateChecksumType option is set multiple times"); 513 } 514 checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue(); 515 } else if (CreateOpts.Perms.class.isInstance(iOpt)) { 516 if (permission != null) { 517 throw new HadoopIllegalArgumentException( 518 "Perms option is set multiple times"); 519 } 520 permission = ((CreateOpts.Perms) iOpt).getValue(); 521 } else if (CreateOpts.Progress.class.isInstance(iOpt)) { 522 if (progress != null) { 523 throw new HadoopIllegalArgumentException( 524 "Progress option is set multiple times"); 525 } 526 progress = ((CreateOpts.Progress) iOpt).getValue(); 527 } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) { 528 if (createParent != null) { 529 throw new HadoopIllegalArgumentException( 530 "CreateParent option is set multiple times"); 531 } 532 createParent = ((CreateOpts.CreateParent) iOpt).getValue(); 533 } else { 534 throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " + 535 iOpt.getClass().getName()); 536 } 537 } 538 if (permission == null) { 539 throw new HadoopIllegalArgumentException("no permission supplied"); 540 } 541 542 543 FsServerDefaults ssDef = getServerDefaults(); 544 if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) { 545 throw new IOException("Internal error: default blockSize is" + 546 " not a multiple of default bytesPerChecksum "); 547 } 548 549 if (blockSize == -1) { 550 blockSize = ssDef.getBlockSize(); 551 } 552 553 // Create a checksum option honoring user input as much as possible. 554 // If bytesPerChecksum is specified, it will override the one set in 555 // checksumOpt. Any missing value will be filled in using the default. 556 ChecksumOpt defaultOpt = new ChecksumOpt( 557 ssDef.getChecksumType(), 558 ssDef.getBytesPerChecksum()); 559 checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt, 560 checksumOpt, bytesPerChecksum); 561 562 if (bufferSize == -1) { 563 bufferSize = ssDef.getFileBufferSize(); 564 } 565 if (replication == -1) { 566 replication = ssDef.getReplication(); 567 } 568 if (createParent == null) { 569 createParent = false; 570 } 571 572 if (blockSize % bytesPerChecksum != 0) { 573 throw new HadoopIllegalArgumentException( 574 "blockSize should be a multiple of checksumsize"); 575 } 576 577 return this.createInternal(f, createFlag, permission, bufferSize, 578 replication, blockSize, progress, checksumOpt, createParent); 579 } 580 581 /** 582 * The specification of this method matches that of 583 * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts 584 * have been declared explicitly. 585 */ 586 public abstract FSDataOutputStream createInternal(Path f, 587 EnumSet<CreateFlag> flag, FsPermission absolutePermission, 588 int bufferSize, short replication, long blockSize, Progressable progress, 589 ChecksumOpt checksumOpt, boolean createParent) 590 throws AccessControlException, FileAlreadyExistsException, 591 FileNotFoundException, ParentNotDirectoryException, 592 UnsupportedFileSystemException, UnresolvedLinkException, IOException; 593 594 /** 595 * The specification of this method matches that of 596 * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path 597 * f must be fully qualified and the permission is absolute (i.e. 598 * umask has been applied). 599 */ 600 public abstract void mkdir(final Path dir, final FsPermission permission, 601 final boolean createParent) throws AccessControlException, 602 FileAlreadyExistsException, FileNotFoundException, 603 UnresolvedLinkException, IOException; 604 605 /** 606 * The specification of this method matches that of 607 * {@link FileContext#delete(Path, boolean)} except that Path f must be for 608 * this file system. 609 */ 610 public abstract boolean delete(final Path f, final boolean recursive) 611 throws AccessControlException, FileNotFoundException, 612 UnresolvedLinkException, IOException; 613 614 /** 615 * The specification of this method matches that of 616 * {@link FileContext#open(Path)} except that Path f must be for this 617 * file system. 618 */ 619 public FSDataInputStream open(final Path f) throws AccessControlException, 620 FileNotFoundException, UnresolvedLinkException, IOException { 621 return open(f, getServerDefaults().getFileBufferSize()); 622 } 623 624 /** 625 * The specification of this method matches that of 626 * {@link FileContext#open(Path, int)} except that Path f must be for this 627 * file system. 628 */ 629 public abstract FSDataInputStream open(final Path f, int bufferSize) 630 throws AccessControlException, FileNotFoundException, 631 UnresolvedLinkException, IOException; 632 633 /** 634 * The specification of this method matches that of 635 * {@link FileContext#setReplication(Path, short)} except that Path f must be 636 * for this file system. 637 */ 638 public abstract boolean setReplication(final Path f, 639 final short replication) throws AccessControlException, 640 FileNotFoundException, UnresolvedLinkException, IOException; 641 642 /** 643 * The specification of this method matches that of 644 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 645 * f must be for this file system. 646 */ 647 public final void rename(final Path src, final Path dst, 648 final Options.Rename... options) throws AccessControlException, 649 FileAlreadyExistsException, FileNotFoundException, 650 ParentNotDirectoryException, UnresolvedLinkException, IOException { 651 boolean overwrite = false; 652 if (null != options) { 653 for (Rename option : options) { 654 if (option == Rename.OVERWRITE) { 655 overwrite = true; 656 } 657 } 658 } 659 renameInternal(src, dst, overwrite); 660 } 661 662 /** 663 * The specification of this method matches that of 664 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 665 * f must be for this file system and NO OVERWRITE is performed. 666 * 667 * File systems that do not have a built in overwrite need implement only this 668 * method and can take advantage of the default impl of the other 669 * {@link #renameInternal(Path, Path, boolean)} 670 */ 671 public abstract void renameInternal(final Path src, final Path dst) 672 throws AccessControlException, FileAlreadyExistsException, 673 FileNotFoundException, ParentNotDirectoryException, 674 UnresolvedLinkException, IOException; 675 676 /** 677 * The specification of this method matches that of 678 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 679 * f must be for this file system. 680 */ 681 public void renameInternal(final Path src, final Path dst, 682 boolean overwrite) throws AccessControlException, 683 FileAlreadyExistsException, FileNotFoundException, 684 ParentNotDirectoryException, UnresolvedLinkException, IOException { 685 // Default implementation deals with overwrite in a non-atomic way 686 final FileStatus srcStatus = getFileLinkStatus(src); 687 688 FileStatus dstStatus; 689 try { 690 dstStatus = getFileLinkStatus(dst); 691 } catch (IOException e) { 692 dstStatus = null; 693 } 694 if (dstStatus != null) { 695 if (dst.equals(src)) { 696 throw new FileAlreadyExistsException( 697 "The source "+src+" and destination "+dst+" are the same"); 698 } 699 if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) { 700 throw new FileAlreadyExistsException( 701 "Cannot rename symlink "+src+" to its target "+dst); 702 } 703 // It's OK to rename a file to a symlink and vice versa 704 if (srcStatus.isDirectory() != dstStatus.isDirectory()) { 705 throw new IOException("Source " + src + " and destination " + dst 706 + " must both be directories"); 707 } 708 if (!overwrite) { 709 throw new FileAlreadyExistsException("Rename destination " + dst 710 + " already exists."); 711 } 712 // Delete the destination that is a file or an empty directory 713 if (dstStatus.isDirectory()) { 714 RemoteIterator<FileStatus> list = listStatusIterator(dst); 715 if (list != null && list.hasNext()) { 716 throw new IOException( 717 "Rename cannot overwrite non empty destination directory " + dst); 718 } 719 } 720 delete(dst, false); 721 } else { 722 final Path parent = dst.getParent(); 723 final FileStatus parentStatus = getFileStatus(parent); 724 if (parentStatus.isFile()) { 725 throw new ParentNotDirectoryException("Rename destination parent " 726 + parent + " is a file."); 727 } 728 } 729 renameInternal(src, dst); 730 } 731 732 /** 733 * Returns true if the file system supports symlinks, false otherwise. 734 * @return true if filesystem supports symlinks 735 */ 736 public boolean supportsSymlinks() { 737 return false; 738 } 739 740 /** 741 * The specification of this method matches that of 742 * {@link FileContext#createSymlink(Path, Path, boolean)}; 743 */ 744 public void createSymlink(final Path target, final Path link, 745 final boolean createParent) throws IOException, UnresolvedLinkException { 746 throw new IOException("File system does not support symlinks"); 747 } 748 749 /** 750 * Partially resolves the path. This is used during symlink resolution in 751 * {@link FSLinkResolver}, and differs from the similarly named method 752 * {@link FileContext#getLinkTarget(Path)}. 753 * @throws IOException subclass implementations may throw IOException 754 */ 755 public Path getLinkTarget(final Path f) throws IOException { 756 throw new AssertionError("Implementation Error: " + getClass() 757 + " that threw an UnresolvedLinkException, causing this method to be" 758 + " called, needs to override this method."); 759 } 760 761 /** 762 * The specification of this method matches that of 763 * {@link FileContext#setPermission(Path, FsPermission)} except that Path f 764 * must be for this file system. 765 */ 766 public abstract void setPermission(final Path f, 767 final FsPermission permission) throws AccessControlException, 768 FileNotFoundException, UnresolvedLinkException, IOException; 769 770 /** 771 * The specification of this method matches that of 772 * {@link FileContext#setOwner(Path, String, String)} except that Path f must 773 * be for this file system. 774 */ 775 public abstract void setOwner(final Path f, final String username, 776 final String groupname) throws AccessControlException, 777 FileNotFoundException, UnresolvedLinkException, IOException; 778 779 /** 780 * The specification of this method matches that of 781 * {@link FileContext#setTimes(Path, long, long)} except that Path f must be 782 * for this file system. 783 */ 784 public abstract void setTimes(final Path f, final long mtime, 785 final long atime) throws AccessControlException, FileNotFoundException, 786 UnresolvedLinkException, IOException; 787 788 /** 789 * The specification of this method matches that of 790 * {@link FileContext#getFileChecksum(Path)} except that Path f must be for 791 * this file system. 792 */ 793 public abstract FileChecksum getFileChecksum(final Path f) 794 throws AccessControlException, FileNotFoundException, 795 UnresolvedLinkException, IOException; 796 797 /** 798 * The specification of this method matches that of 799 * {@link FileContext#getFileStatus(Path)} 800 * except that an UnresolvedLinkException may be thrown if a symlink is 801 * encountered in the path. 802 */ 803 public abstract FileStatus getFileStatus(final Path f) 804 throws AccessControlException, FileNotFoundException, 805 UnresolvedLinkException, IOException; 806 807 /** 808 * The specification of this method matches that of 809 * {@link FileContext#access(Path, FsAction)} 810 * except that an UnresolvedLinkException may be thrown if a symlink is 811 * encountered in the path. 812 */ 813 @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) 814 public void access(Path path, FsAction mode) throws AccessControlException, 815 FileNotFoundException, UnresolvedLinkException, IOException { 816 FileSystem.checkAccessPermissions(this.getFileStatus(path), mode); 817 } 818 819 /** 820 * The specification of this method matches that of 821 * {@link FileContext#getFileLinkStatus(Path)} 822 * except that an UnresolvedLinkException may be thrown if a symlink is 823 * encountered in the path leading up to the final path component. 824 * If the file system does not support symlinks then the behavior is 825 * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}. 826 */ 827 public FileStatus getFileLinkStatus(final Path f) 828 throws AccessControlException, FileNotFoundException, 829 UnsupportedFileSystemException, IOException { 830 return getFileStatus(f); 831 } 832 833 /** 834 * The specification of this method matches that of 835 * {@link FileContext#getFileBlockLocations(Path, long, long)} except that 836 * Path f must be for this file system. 837 */ 838 public abstract BlockLocation[] getFileBlockLocations(final Path f, 839 final long start, final long len) throws AccessControlException, 840 FileNotFoundException, UnresolvedLinkException, IOException; 841 842 /** 843 * The specification of this method matches that of 844 * {@link FileContext#getFsStatus(Path)} except that Path f must be for this 845 * file system. 846 */ 847 public FsStatus getFsStatus(final Path f) throws AccessControlException, 848 FileNotFoundException, UnresolvedLinkException, IOException { 849 // default impl gets FsStatus of root 850 return getFsStatus(); 851 } 852 853 /** 854 * The specification of this method matches that of 855 * {@link FileContext#getFsStatus(Path)}. 856 */ 857 public abstract FsStatus getFsStatus() throws AccessControlException, 858 FileNotFoundException, IOException; 859 860 /** 861 * The specification of this method matches that of 862 * {@link FileContext#listStatus(Path)} except that Path f must be for this 863 * file system. 864 */ 865 public RemoteIterator<FileStatus> listStatusIterator(final Path f) 866 throws AccessControlException, FileNotFoundException, 867 UnresolvedLinkException, IOException { 868 return new RemoteIterator<FileStatus>() { 869 private int i = 0; 870 private FileStatus[] statusList = listStatus(f); 871 872 @Override 873 public boolean hasNext() { 874 return i < statusList.length; 875 } 876 877 @Override 878 public FileStatus next() { 879 if (!hasNext()) { 880 throw new NoSuchElementException(); 881 } 882 return statusList[i++]; 883 } 884 }; 885 } 886 887 /** 888 * The specification of this method matches that of 889 * {@link FileContext#listLocatedStatus(Path)} except that Path f 890 * must be for this file system. 891 */ 892 public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f) 893 throws AccessControlException, FileNotFoundException, 894 UnresolvedLinkException, IOException { 895 return new RemoteIterator<LocatedFileStatus>() { 896 private RemoteIterator<FileStatus> itor = listStatusIterator(f); 897 898 @Override 899 public boolean hasNext() throws IOException { 900 return itor.hasNext(); 901 } 902 903 @Override 904 public LocatedFileStatus next() throws IOException { 905 if (!hasNext()) { 906 throw new NoSuchElementException("No more entry in " + f); 907 } 908 FileStatus result = itor.next(); 909 BlockLocation[] locs = null; 910 if (result.isFile()) { 911 locs = getFileBlockLocations( 912 result.getPath(), 0, result.getLen()); 913 } 914 return new LocatedFileStatus(result, locs); 915 } 916 }; 917 } 918 919 /** 920 * The specification of this method matches that of 921 * {@link FileContext.Util#listStatus(Path)} except that Path f must be 922 * for this file system. 923 */ 924 public abstract FileStatus[] listStatus(final Path f) 925 throws AccessControlException, FileNotFoundException, 926 UnresolvedLinkException, IOException; 927 928 /** 929 * @return an iterator over the corrupt files under the given path 930 * (may contain duplicates if a file has more than one corrupt block) 931 * @throws IOException 932 */ 933 public RemoteIterator<Path> listCorruptFileBlocks(Path path) 934 throws IOException { 935 throw new UnsupportedOperationException(getClass().getCanonicalName() + 936 " does not support" + 937 " listCorruptFileBlocks"); 938 } 939 940 /** 941 * The specification of this method matches that of 942 * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f 943 * must be for this file system. 944 */ 945 public abstract void setVerifyChecksum(final boolean verifyChecksum) 946 throws AccessControlException, IOException; 947 948 /** 949 * Get a canonical name for this file system. 950 * @return a URI string that uniquely identifies this file system 951 */ 952 public String getCanonicalServiceName() { 953 return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort()); 954 } 955 956 /** 957 * Get one or more delegation tokens associated with the filesystem. Normally 958 * a file system returns a single delegation token. A file system that manages 959 * multiple file systems underneath, could return set of delegation tokens for 960 * all the file systems it manages 961 * 962 * @param renewer the account name that is allowed to renew the token. 963 * @return List of delegation tokens. 964 * If delegation tokens not supported then return a list of size zero. 965 * @throws IOException 966 */ 967 @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" }) 968 public List<Token<?>> getDelegationTokens(String renewer) throws IOException { 969 return new ArrayList<Token<?>>(0); 970 } 971 972 /** 973 * Modifies ACL entries of files and directories. This method can add new ACL 974 * entries or modify the permissions on existing ACL entries. All existing 975 * ACL entries that are not specified in this call are retained without 976 * changes. (Modifications are merged into the current ACL.) 977 * 978 * @param path Path to modify 979 * @param aclSpec List<AclEntry> describing modifications 980 * @throws IOException if an ACL could not be modified 981 */ 982 public void modifyAclEntries(Path path, List<AclEntry> aclSpec) 983 throws IOException { 984 throw new UnsupportedOperationException(getClass().getSimpleName() 985 + " doesn't support modifyAclEntries"); 986 } 987 988 /** 989 * Removes ACL entries from files and directories. Other ACL entries are 990 * retained. 991 * 992 * @param path Path to modify 993 * @param aclSpec List<AclEntry> describing entries to remove 994 * @throws IOException if an ACL could not be modified 995 */ 996 public void removeAclEntries(Path path, List<AclEntry> aclSpec) 997 throws IOException { 998 throw new UnsupportedOperationException(getClass().getSimpleName() 999 + " doesn't support removeAclEntries"); 1000 } 1001 1002 /** 1003 * Removes all default ACL entries from files and directories. 1004 * 1005 * @param path Path to modify 1006 * @throws IOException if an ACL could not be modified 1007 */ 1008 public void removeDefaultAcl(Path path) 1009 throws IOException { 1010 throw new UnsupportedOperationException(getClass().getSimpleName() 1011 + " doesn't support removeDefaultAcl"); 1012 } 1013 1014 /** 1015 * Removes all but the base ACL entries of files and directories. The entries 1016 * for user, group, and others are retained for compatibility with permission 1017 * bits. 1018 * 1019 * @param path Path to modify 1020 * @throws IOException if an ACL could not be removed 1021 */ 1022 public void removeAcl(Path path) 1023 throws IOException { 1024 throw new UnsupportedOperationException(getClass().getSimpleName() 1025 + " doesn't support removeAcl"); 1026 } 1027 1028 /** 1029 * Fully replaces ACL of files and directories, discarding all existing 1030 * entries. 1031 * 1032 * @param path Path to modify 1033 * @param aclSpec List<AclEntry> describing modifications, must include entries 1034 * for user, group, and others for compatibility with permission bits. 1035 * @throws IOException if an ACL could not be modified 1036 */ 1037 public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { 1038 throw new UnsupportedOperationException(getClass().getSimpleName() 1039 + " doesn't support setAcl"); 1040 } 1041 1042 /** 1043 * Gets the ACLs of files and directories. 1044 * 1045 * @param path Path to get 1046 * @return RemoteIterator<AclStatus> which returns each AclStatus 1047 * @throws IOException if an ACL could not be read 1048 */ 1049 public AclStatus getAclStatus(Path path) throws IOException { 1050 throw new UnsupportedOperationException(getClass().getSimpleName() 1051 + " doesn't support getAclStatus"); 1052 } 1053 1054 /** 1055 * Set an xattr of a file or directory. 1056 * The name must be prefixed with the namespace followed by ".". For example, 1057 * "user.attr". 1058 * <p/> 1059 * Refer to the HDFS extended attributes user documentation for details. 1060 * 1061 * @param path Path to modify 1062 * @param name xattr name. 1063 * @param value xattr value. 1064 * @throws IOException 1065 */ 1066 public void setXAttr(Path path, String name, byte[] value) 1067 throws IOException { 1068 setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE, 1069 XAttrSetFlag.REPLACE)); 1070 } 1071 1072 /** 1073 * Set an xattr of a file or directory. 1074 * The name must be prefixed with the namespace followed by ".". For example, 1075 * "user.attr". 1076 * <p/> 1077 * Refer to the HDFS extended attributes user documentation for details. 1078 * 1079 * @param path Path to modify 1080 * @param name xattr name. 1081 * @param value xattr value. 1082 * @param flag xattr set flag 1083 * @throws IOException 1084 */ 1085 public void setXAttr(Path path, String name, byte[] value, 1086 EnumSet<XAttrSetFlag> flag) throws IOException { 1087 throw new UnsupportedOperationException(getClass().getSimpleName() 1088 + " doesn't support setXAttr"); 1089 } 1090 1091 /** 1092 * Get an xattr for a file or directory. 1093 * The name must be prefixed with the namespace followed by ".". For example, 1094 * "user.attr". 1095 * <p/> 1096 * Refer to the HDFS extended attributes user documentation for details. 1097 * 1098 * @param path Path to get extended attribute 1099 * @param name xattr name. 1100 * @return byte[] xattr value. 1101 * @throws IOException 1102 */ 1103 public byte[] getXAttr(Path path, String name) throws IOException { 1104 throw new UnsupportedOperationException(getClass().getSimpleName() 1105 + " doesn't support getXAttr"); 1106 } 1107 1108 /** 1109 * Get all of the xattrs for a file or directory. 1110 * Only those xattrs for which the logged-in user has permissions to view 1111 * are returned. 1112 * <p/> 1113 * Refer to the HDFS extended attributes user documentation for details. 1114 * 1115 * @param path Path to get extended attributes 1116 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1117 * @throws IOException 1118 */ 1119 public Map<String, byte[]> getXAttrs(Path path) throws IOException { 1120 throw new UnsupportedOperationException(getClass().getSimpleName() 1121 + " doesn't support getXAttrs"); 1122 } 1123 1124 /** 1125 * Get all of the xattrs for a file or directory. 1126 * Only those xattrs for which the logged-in user has permissions to view 1127 * are returned. 1128 * <p/> 1129 * Refer to the HDFS extended attributes user documentation for details. 1130 * 1131 * @param path Path to get extended attributes 1132 * @param names XAttr names. 1133 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1134 * @throws IOException 1135 */ 1136 public Map<String, byte[]> getXAttrs(Path path, List<String> names) 1137 throws IOException { 1138 throw new UnsupportedOperationException(getClass().getSimpleName() 1139 + " doesn't support getXAttrs"); 1140 } 1141 1142 /** 1143 * Get all of the xattr names for a file or directory. 1144 * Only the xattr names for which the logged-in user has permissions to view 1145 * are returned. 1146 * <p/> 1147 * Refer to the HDFS extended attributes user documentation for details. 1148 * 1149 * @param path Path to get extended attributes 1150 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1151 * @throws IOException 1152 */ 1153 public List<String> listXAttrs(Path path) 1154 throws IOException { 1155 throw new UnsupportedOperationException(getClass().getSimpleName() 1156 + " doesn't support listXAttrs"); 1157 } 1158 1159 /** 1160 * Remove an xattr of a file or directory. 1161 * The name must be prefixed with the namespace followed by ".". For example, 1162 * "user.attr". 1163 * <p/> 1164 * Refer to the HDFS extended attributes user documentation for details. 1165 * 1166 * @param path Path to remove extended attribute 1167 * @param name xattr name 1168 * @throws IOException 1169 */ 1170 public void removeXAttr(Path path, String name) throws IOException { 1171 throw new UnsupportedOperationException(getClass().getSimpleName() 1172 + " doesn't support removeXAttr"); 1173 } 1174 1175 @Override //Object 1176 public int hashCode() { 1177 return myUri.hashCode(); 1178 } 1179 1180 @Override //Object 1181 public boolean equals(Object other) { 1182 if (other == null || !(other instanceof AbstractFileSystem)) { 1183 return false; 1184 } 1185 return myUri.equals(((AbstractFileSystem) other).myUri); 1186 } 1187}