001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.fs; 019 020import java.io.FileNotFoundException; 021import java.io.IOException; 022import java.lang.reflect.Constructor; 023import java.net.URI; 024import java.net.URISyntaxException; 025import java.util.ArrayList; 026import java.util.EnumSet; 027import java.util.HashMap; 028import java.util.List; 029import java.util.Map; 030import java.util.NoSuchElementException; 031import java.util.StringTokenizer; 032import java.util.concurrent.ConcurrentHashMap; 033 034import org.apache.commons.logging.Log; 035import org.apache.commons.logging.LogFactory; 036import org.apache.hadoop.HadoopIllegalArgumentException; 037import org.apache.hadoop.classification.InterfaceAudience; 038import org.apache.hadoop.classification.InterfaceStability; 039import org.apache.hadoop.conf.Configuration; 040import org.apache.hadoop.fs.FileSystem.Statistics; 041import org.apache.hadoop.fs.Options.ChecksumOpt; 042import org.apache.hadoop.fs.Options.CreateOpts; 043import org.apache.hadoop.fs.Options.Rename; 044import org.apache.hadoop.fs.permission.AclEntry; 045import org.apache.hadoop.fs.permission.AclStatus; 046import org.apache.hadoop.fs.permission.FsAction; 047import org.apache.hadoop.fs.permission.FsPermission; 048import org.apache.hadoop.security.AccessControlException; 049import org.apache.hadoop.security.SecurityUtil; 050import org.apache.hadoop.security.token.Token; 051import org.apache.hadoop.util.Progressable; 052 053import com.google.common.annotations.VisibleForTesting; 054 055/** 056 * This class provides an interface for implementors of a Hadoop file system 057 * (analogous to the VFS of Unix). Applications do not access this class; 058 * instead they access files across all file systems using {@link FileContext}. 059 * 060 * Pathnames passed to AbstractFileSystem can be fully qualified URI that 061 * matches the "this" file system (ie same scheme and authority) 062 * or a Slash-relative name that is assumed to be relative 063 * to the root of the "this" file system . 064 */ 065@InterfaceAudience.Public 066@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ 067public abstract class AbstractFileSystem { 068 static final Log LOG = LogFactory.getLog(AbstractFileSystem.class); 069 070 /** Recording statistics per a file system class. */ 071 private static final Map<URI, Statistics> 072 STATISTICS_TABLE = new HashMap<URI, Statistics>(); 073 074 /** Cache of constructors for each file system class. */ 075 private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 076 new ConcurrentHashMap<Class<?>, Constructor<?>>(); 077 078 private static final Class<?>[] URI_CONFIG_ARGS = 079 new Class[]{URI.class, Configuration.class}; 080 081 /** The statistics for this file system. */ 082 protected Statistics statistics; 083 084 @VisibleForTesting 085 static final String NO_ABSTRACT_FS_ERROR = "No AbstractFileSystem configured for scheme"; 086 087 private final URI myUri; 088 089 public Statistics getStatistics() { 090 return statistics; 091 } 092 093 /** 094 * Returns true if the specified string is considered valid in the path part 095 * of a URI by this file system. The default implementation enforces the rules 096 * of HDFS, but subclasses may override this method to implement specific 097 * validation rules for specific file systems. 098 * 099 * @param src String source filename to check, path part of the URI 100 * @return boolean true if the specified string is considered valid 101 */ 102 public boolean isValidName(String src) { 103 // Prohibit ".." "." and anything containing ":" 104 StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR); 105 while(tokens.hasMoreTokens()) { 106 String element = tokens.nextToken(); 107 if (element.equals("..") || 108 element.equals(".") || 109 (element.indexOf(":") >= 0)) { 110 return false; 111 } 112 } 113 return true; 114 } 115 116 /** 117 * Create an object for the given class and initialize it from conf. 118 * @param theClass class of which an object is created 119 * @param conf Configuration 120 * @return a new object 121 */ 122 @SuppressWarnings("unchecked") 123 static <T> T newInstance(Class<T> theClass, 124 URI uri, Configuration conf) { 125 T result; 126 try { 127 Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass); 128 if (meth == null) { 129 meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS); 130 meth.setAccessible(true); 131 CONSTRUCTOR_CACHE.put(theClass, meth); 132 } 133 result = meth.newInstance(uri, conf); 134 } catch (Exception e) { 135 throw new RuntimeException(e); 136 } 137 return result; 138 } 139 140 /** 141 * Create a file system instance for the specified uri using the conf. The 142 * conf is used to find the class name that implements the file system. The 143 * conf is also passed to the file system for its configuration. 144 * 145 * @param uri URI of the file system 146 * @param conf Configuration for the file system 147 * 148 * @return Returns the file system for the given URI 149 * 150 * @throws UnsupportedFileSystemException file system for <code>uri</code> is 151 * not found 152 */ 153 public static AbstractFileSystem createFileSystem(URI uri, Configuration conf) 154 throws UnsupportedFileSystemException { 155 final String fsImplConf = String.format("fs.AbstractFileSystem.%s.impl", 156 uri.getScheme()); 157 158 Class<?> clazz = conf.getClass(fsImplConf, null); 159 if (clazz == null) { 160 throw new UnsupportedFileSystemException(String.format( 161 "%s=null: %s: %s", 162 fsImplConf, NO_ABSTRACT_FS_ERROR, uri.getScheme())); 163 } 164 return (AbstractFileSystem) newInstance(clazz, uri, conf); 165 } 166 167 /** 168 * Get the statistics for a particular file system. 169 * 170 * @param uri 171 * used as key to lookup STATISTICS_TABLE. Only scheme and authority 172 * part of the uri are used. 173 * @return a statistics object 174 */ 175 protected static synchronized Statistics getStatistics(URI uri) { 176 String scheme = uri.getScheme(); 177 if (scheme == null) { 178 throw new IllegalArgumentException("Scheme not defined in the uri: " 179 + uri); 180 } 181 URI baseUri = getBaseUri(uri); 182 Statistics result = STATISTICS_TABLE.get(baseUri); 183 if (result == null) { 184 result = new Statistics(scheme); 185 STATISTICS_TABLE.put(baseUri, result); 186 } 187 return result; 188 } 189 190 private static URI getBaseUri(URI uri) { 191 String scheme = uri.getScheme(); 192 String authority = uri.getAuthority(); 193 String baseUriString = scheme + "://"; 194 if (authority != null) { 195 baseUriString = baseUriString + authority; 196 } else { 197 baseUriString = baseUriString + "/"; 198 } 199 return URI.create(baseUriString); 200 } 201 202 public static synchronized void clearStatistics() { 203 for(Statistics stat: STATISTICS_TABLE.values()) { 204 stat.reset(); 205 } 206 } 207 208 /** 209 * Prints statistics for all file systems. 210 */ 211 public static synchronized void printStatistics() { 212 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 213 System.out.println(" FileSystem " + pair.getKey().getScheme() + "://" 214 + pair.getKey().getAuthority() + ": " + pair.getValue()); 215 } 216 } 217 218 protected static synchronized Map<URI, Statistics> getAllStatistics() { 219 Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>( 220 STATISTICS_TABLE.size()); 221 for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) { 222 URI key = pair.getKey(); 223 Statistics value = pair.getValue(); 224 Statistics newStatsObj = new Statistics(value); 225 statsMap.put(URI.create(key.toString()), newStatsObj); 226 } 227 return statsMap; 228 } 229 230 /** 231 * The main factory method for creating a file system. Get a file system for 232 * the URI's scheme and authority. The scheme of the <code>uri</code> 233 * determines a configuration property name, 234 * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the 235 * AbstractFileSystem class. 236 * 237 * The entire URI and conf is passed to the AbstractFileSystem factory method. 238 * 239 * @param uri for the file system to be created. 240 * @param conf which is passed to the file system impl. 241 * 242 * @return file system for the given URI. 243 * 244 * @throws UnsupportedFileSystemException if the file system for 245 * <code>uri</code> is not supported. 246 */ 247 public static AbstractFileSystem get(final URI uri, final Configuration conf) 248 throws UnsupportedFileSystemException { 249 return createFileSystem(uri, conf); 250 } 251 252 /** 253 * Constructor to be called by subclasses. 254 * 255 * @param uri for this file system. 256 * @param supportedScheme the scheme supported by the implementor 257 * @param authorityNeeded if true then theURI must have authority, if false 258 * then the URI must have null authority. 259 * 260 * @throws URISyntaxException <code>uri</code> has syntax error 261 */ 262 public AbstractFileSystem(final URI uri, final String supportedScheme, 263 final boolean authorityNeeded, final int defaultPort) 264 throws URISyntaxException { 265 myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort); 266 statistics = getStatistics(uri); 267 } 268 269 /** 270 * Check that the Uri's scheme matches 271 * @param uri 272 * @param supportedScheme 273 */ 274 public void checkScheme(URI uri, String supportedScheme) { 275 String scheme = uri.getScheme(); 276 if (scheme == null) { 277 throw new HadoopIllegalArgumentException("Uri without scheme: " + uri); 278 } 279 if (!scheme.equals(supportedScheme)) { 280 throw new HadoopIllegalArgumentException("Uri scheme " + uri 281 + " does not match the scheme " + supportedScheme); 282 } 283 } 284 285 /** 286 * Get the URI for the file system based on the given URI. The path, query 287 * part of the given URI is stripped out and default file system port is used 288 * to form the URI. 289 * 290 * @param uri FileSystem URI. 291 * @param authorityNeeded if true authority cannot be null in the URI. If 292 * false authority must be null. 293 * @param defaultPort default port to use if port is not specified in the URI. 294 * 295 * @return URI of the file system 296 * 297 * @throws URISyntaxException <code>uri</code> has syntax error 298 */ 299 private URI getUri(URI uri, String supportedScheme, 300 boolean authorityNeeded, int defaultPort) throws URISyntaxException { 301 checkScheme(uri, supportedScheme); 302 // A file system implementation that requires authority must always 303 // specify default port 304 if (defaultPort < 0 && authorityNeeded) { 305 throw new HadoopIllegalArgumentException( 306 "FileSystem implementation error - default port " + defaultPort 307 + " is not valid"); 308 } 309 String authority = uri.getAuthority(); 310 if (authority == null) { 311 if (authorityNeeded) { 312 throw new HadoopIllegalArgumentException("Uri without authority: " + uri); 313 } else { 314 return new URI(supportedScheme + ":///"); 315 } 316 } 317 // authority is non null - AuthorityNeeded may be true or false. 318 int port = uri.getPort(); 319 port = (port == -1 ? defaultPort : port); 320 if (port == -1) { // no port supplied and default port is not specified 321 return new URI(supportedScheme, authority, "/", null); 322 } 323 return new URI(supportedScheme + "://" + uri.getHost() + ":" + port); 324 } 325 326 /** 327 * The default port of this file system. 328 * 329 * @return default port of this file system's Uri scheme 330 * A uri with a port of -1 => default port; 331 */ 332 public abstract int getUriDefaultPort(); 333 334 /** 335 * Returns a URI whose scheme and authority identify this FileSystem. 336 * 337 * @return the uri of this file system. 338 */ 339 public URI getUri() { 340 return myUri; 341 } 342 343 /** 344 * Check that a Path belongs to this FileSystem. 345 * 346 * If the path is fully qualified URI, then its scheme and authority 347 * matches that of this file system. Otherwise the path must be 348 * slash-relative name. 349 * 350 * @throws InvalidPathException if the path is invalid 351 */ 352 public void checkPath(Path path) { 353 URI uri = path.toUri(); 354 String thatScheme = uri.getScheme(); 355 String thatAuthority = uri.getAuthority(); 356 if (thatScheme == null) { 357 if (thatAuthority == null) { 358 if (path.isUriPathAbsolute()) { 359 return; 360 } 361 throw new InvalidPathException("relative paths not allowed:" + 362 path); 363 } else { 364 throw new InvalidPathException( 365 "Path without scheme with non-null authority:" + path); 366 } 367 } 368 String thisScheme = this.getUri().getScheme(); 369 String thisHost = this.getUri().getHost(); 370 String thatHost = uri.getHost(); 371 372 // Schemes and hosts must match. 373 // Allow for null Authority for file:/// 374 if (!thisScheme.equalsIgnoreCase(thatScheme) || 375 (thisHost != null && 376 !thisHost.equalsIgnoreCase(thatHost)) || 377 (thisHost == null && thatHost != null)) { 378 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 379 + this.getUri()); 380 } 381 382 // Ports must match, unless this FS instance is using the default port, in 383 // which case the port may be omitted from the given URI 384 int thisPort = this.getUri().getPort(); 385 int thatPort = uri.getPort(); 386 if (thatPort == -1) { // -1 => defaultPort of Uri scheme 387 thatPort = this.getUriDefaultPort(); 388 } 389 if (thisPort != thatPort) { 390 throw new InvalidPathException("Wrong FS: " + path + ", expected: " 391 + this.getUri()); 392 } 393 } 394 395 /** 396 * Get the path-part of a pathname. Checks that URI matches this file system 397 * and that the path-part is a valid name. 398 * 399 * @param p path 400 * 401 * @return path-part of the Path p 402 */ 403 public String getUriPath(final Path p) { 404 checkPath(p); 405 String s = p.toUri().getPath(); 406 if (!isValidName(s)) { 407 throw new InvalidPathException("Path part " + s + " from URI " + p 408 + " is not a valid filename."); 409 } 410 return s; 411 } 412 413 /** 414 * Make the path fully qualified to this file system 415 * @param path 416 * @return the qualified path 417 */ 418 public Path makeQualified(Path path) { 419 checkPath(path); 420 return path.makeQualified(this.getUri(), null); 421 } 422 423 /** 424 * Some file systems like LocalFileSystem have an initial workingDir 425 * that is used as the starting workingDir. For other file systems 426 * like HDFS there is no built in notion of an initial workingDir. 427 * 428 * @return the initial workingDir if the file system has such a notion 429 * otherwise return a null. 430 */ 431 public Path getInitialWorkingDirectory() { 432 return null; 433 } 434 435 /** 436 * Return the current user's home directory in this file system. 437 * The default implementation returns "/user/$USER/". 438 * 439 * @return current user's home directory. 440 */ 441 public Path getHomeDirectory() { 442 return new Path("/user/"+System.getProperty("user.name")).makeQualified( 443 getUri(), null); 444 } 445 446 /** 447 * Return a set of server default configuration values. 448 * 449 * @return server default configuration values 450 * 451 * @throws IOException an I/O error occurred 452 * @deprecated use {@link #getServerDefaults(Path)} instead 453 */ 454 @Deprecated 455 public abstract FsServerDefaults getServerDefaults() throws IOException; 456 457 /** 458 * Return a set of server default configuration values based on path. 459 * @param f path to fetch server defaults 460 * @return server default configuration values for path 461 * @throws IOException an I/O error occurred 462 */ 463 public FsServerDefaults getServerDefaults(final Path f) throws IOException { 464 return getServerDefaults(); 465 } 466 467 /** 468 * Return the fully-qualified path of path f resolving the path 469 * through any internal symlinks or mount point 470 * @param p path to be resolved 471 * @return fully qualified path 472 * @throws FileNotFoundException, AccessControlException, IOException 473 * UnresolvedLinkException if symbolic link on path cannot be resolved 474 * internally 475 */ 476 public Path resolvePath(final Path p) throws FileNotFoundException, 477 UnresolvedLinkException, AccessControlException, IOException { 478 checkPath(p); 479 return getFileStatus(p).getPath(); // default impl is to return the path 480 } 481 482 /** 483 * The specification of this method matches that of 484 * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except 485 * that the Path f must be fully qualified and the permission is absolute 486 * (i.e. umask has been applied). 487 */ 488 public final FSDataOutputStream create(final Path f, 489 final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts) 490 throws AccessControlException, FileAlreadyExistsException, 491 FileNotFoundException, ParentNotDirectoryException, 492 UnsupportedFileSystemException, UnresolvedLinkException, IOException { 493 checkPath(f); 494 int bufferSize = -1; 495 short replication = -1; 496 long blockSize = -1; 497 int bytesPerChecksum = -1; 498 ChecksumOpt checksumOpt = null; 499 FsPermission permission = null; 500 Progressable progress = null; 501 Boolean createParent = null; 502 503 for (CreateOpts iOpt : opts) { 504 if (CreateOpts.BlockSize.class.isInstance(iOpt)) { 505 if (blockSize != -1) { 506 throw new HadoopIllegalArgumentException( 507 "BlockSize option is set multiple times"); 508 } 509 blockSize = ((CreateOpts.BlockSize) iOpt).getValue(); 510 } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) { 511 if (bufferSize != -1) { 512 throw new HadoopIllegalArgumentException( 513 "BufferSize option is set multiple times"); 514 } 515 bufferSize = ((CreateOpts.BufferSize) iOpt).getValue(); 516 } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) { 517 if (replication != -1) { 518 throw new HadoopIllegalArgumentException( 519 "ReplicationFactor option is set multiple times"); 520 } 521 replication = ((CreateOpts.ReplicationFactor) iOpt).getValue(); 522 } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) { 523 if (bytesPerChecksum != -1) { 524 throw new HadoopIllegalArgumentException( 525 "BytesPerChecksum option is set multiple times"); 526 } 527 bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue(); 528 } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) { 529 if (checksumOpt != null) { 530 throw new HadoopIllegalArgumentException( 531 "CreateChecksumType option is set multiple times"); 532 } 533 checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue(); 534 } else if (CreateOpts.Perms.class.isInstance(iOpt)) { 535 if (permission != null) { 536 throw new HadoopIllegalArgumentException( 537 "Perms option is set multiple times"); 538 } 539 permission = ((CreateOpts.Perms) iOpt).getValue(); 540 } else if (CreateOpts.Progress.class.isInstance(iOpt)) { 541 if (progress != null) { 542 throw new HadoopIllegalArgumentException( 543 "Progress option is set multiple times"); 544 } 545 progress = ((CreateOpts.Progress) iOpt).getValue(); 546 } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) { 547 if (createParent != null) { 548 throw new HadoopIllegalArgumentException( 549 "CreateParent option is set multiple times"); 550 } 551 createParent = ((CreateOpts.CreateParent) iOpt).getValue(); 552 } else { 553 throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " + 554 iOpt.getClass().getName()); 555 } 556 } 557 if (permission == null) { 558 throw new HadoopIllegalArgumentException("no permission supplied"); 559 } 560 561 562 FsServerDefaults ssDef = getServerDefaults(f); 563 if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) { 564 throw new IOException("Internal error: default blockSize is" + 565 " not a multiple of default bytesPerChecksum "); 566 } 567 568 if (blockSize == -1) { 569 blockSize = ssDef.getBlockSize(); 570 } 571 572 // Create a checksum option honoring user input as much as possible. 573 // If bytesPerChecksum is specified, it will override the one set in 574 // checksumOpt. Any missing value will be filled in using the default. 575 ChecksumOpt defaultOpt = new ChecksumOpt( 576 ssDef.getChecksumType(), 577 ssDef.getBytesPerChecksum()); 578 checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt, 579 checksumOpt, bytesPerChecksum); 580 581 if (bufferSize == -1) { 582 bufferSize = ssDef.getFileBufferSize(); 583 } 584 if (replication == -1) { 585 replication = ssDef.getReplication(); 586 } 587 if (createParent == null) { 588 createParent = false; 589 } 590 591 if (blockSize % bytesPerChecksum != 0) { 592 throw new HadoopIllegalArgumentException( 593 "blockSize should be a multiple of checksumsize"); 594 } 595 596 return this.createInternal(f, createFlag, permission, bufferSize, 597 replication, blockSize, progress, checksumOpt, createParent); 598 } 599 600 /** 601 * The specification of this method matches that of 602 * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts 603 * have been declared explicitly. 604 */ 605 public abstract FSDataOutputStream createInternal(Path f, 606 EnumSet<CreateFlag> flag, FsPermission absolutePermission, 607 int bufferSize, short replication, long blockSize, Progressable progress, 608 ChecksumOpt checksumOpt, boolean createParent) 609 throws AccessControlException, FileAlreadyExistsException, 610 FileNotFoundException, ParentNotDirectoryException, 611 UnsupportedFileSystemException, UnresolvedLinkException, IOException; 612 613 /** 614 * The specification of this method matches that of 615 * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path 616 * f must be fully qualified and the permission is absolute (i.e. 617 * umask has been applied). 618 */ 619 public abstract void mkdir(final Path dir, final FsPermission permission, 620 final boolean createParent) throws AccessControlException, 621 FileAlreadyExistsException, FileNotFoundException, 622 UnresolvedLinkException, IOException; 623 624 /** 625 * The specification of this method matches that of 626 * {@link FileContext#delete(Path, boolean)} except that Path f must be for 627 * this file system. 628 */ 629 public abstract boolean delete(final Path f, final boolean recursive) 630 throws AccessControlException, FileNotFoundException, 631 UnresolvedLinkException, IOException; 632 633 /** 634 * The specification of this method matches that of 635 * {@link FileContext#open(Path)} except that Path f must be for this 636 * file system. 637 */ 638 public FSDataInputStream open(final Path f) throws AccessControlException, 639 FileNotFoundException, UnresolvedLinkException, IOException { 640 return open(f, getServerDefaults(f).getFileBufferSize()); 641 } 642 643 /** 644 * The specification of this method matches that of 645 * {@link FileContext#open(Path, int)} except that Path f must be for this 646 * file system. 647 */ 648 public abstract FSDataInputStream open(final Path f, int bufferSize) 649 throws AccessControlException, FileNotFoundException, 650 UnresolvedLinkException, IOException; 651 652 /** 653 * The specification of this method matches that of 654 * {@link FileContext#truncate(Path, long)} except that Path f must be for 655 * this file system. 656 */ 657 public boolean truncate(Path f, long newLength) 658 throws AccessControlException, FileNotFoundException, 659 UnresolvedLinkException, IOException { 660 throw new UnsupportedOperationException(getClass().getSimpleName() 661 + " doesn't support truncate"); 662 } 663 664 /** 665 * The specification of this method matches that of 666 * {@link FileContext#setReplication(Path, short)} except that Path f must be 667 * for this file system. 668 */ 669 public abstract boolean setReplication(final Path f, 670 final short replication) throws AccessControlException, 671 FileNotFoundException, UnresolvedLinkException, IOException; 672 673 /** 674 * The specification of this method matches that of 675 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 676 * f must be for this file system. 677 */ 678 public final void rename(final Path src, final Path dst, 679 final Options.Rename... options) throws AccessControlException, 680 FileAlreadyExistsException, FileNotFoundException, 681 ParentNotDirectoryException, UnresolvedLinkException, IOException { 682 boolean overwrite = false; 683 if (null != options) { 684 for (Rename option : options) { 685 if (option == Rename.OVERWRITE) { 686 overwrite = true; 687 } 688 } 689 } 690 renameInternal(src, dst, overwrite); 691 } 692 693 /** 694 * The specification of this method matches that of 695 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 696 * f must be for this file system and NO OVERWRITE is performed. 697 * 698 * File systems that do not have a built in overwrite need implement only this 699 * method and can take advantage of the default impl of the other 700 * {@link #renameInternal(Path, Path, boolean)} 701 */ 702 public abstract void renameInternal(final Path src, final Path dst) 703 throws AccessControlException, FileAlreadyExistsException, 704 FileNotFoundException, ParentNotDirectoryException, 705 UnresolvedLinkException, IOException; 706 707 /** 708 * The specification of this method matches that of 709 * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path 710 * f must be for this file system. 711 */ 712 public void renameInternal(final Path src, final Path dst, 713 boolean overwrite) throws AccessControlException, 714 FileAlreadyExistsException, FileNotFoundException, 715 ParentNotDirectoryException, UnresolvedLinkException, IOException { 716 // Default implementation deals with overwrite in a non-atomic way 717 final FileStatus srcStatus = getFileLinkStatus(src); 718 719 FileStatus dstStatus; 720 try { 721 dstStatus = getFileLinkStatus(dst); 722 } catch (IOException e) { 723 dstStatus = null; 724 } 725 if (dstStatus != null) { 726 if (dst.equals(src)) { 727 throw new FileAlreadyExistsException( 728 "The source "+src+" and destination "+dst+" are the same"); 729 } 730 if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) { 731 throw new FileAlreadyExistsException( 732 "Cannot rename symlink "+src+" to its target "+dst); 733 } 734 // It's OK to rename a file to a symlink and vice versa 735 if (srcStatus.isDirectory() != dstStatus.isDirectory()) { 736 throw new IOException("Source " + src + " and destination " + dst 737 + " must both be directories"); 738 } 739 if (!overwrite) { 740 throw new FileAlreadyExistsException("Rename destination " + dst 741 + " already exists."); 742 } 743 // Delete the destination that is a file or an empty directory 744 if (dstStatus.isDirectory()) { 745 RemoteIterator<FileStatus> list = listStatusIterator(dst); 746 if (list != null && list.hasNext()) { 747 throw new IOException( 748 "Rename cannot overwrite non empty destination directory " + dst); 749 } 750 } 751 delete(dst, false); 752 } else { 753 final Path parent = dst.getParent(); 754 final FileStatus parentStatus = getFileStatus(parent); 755 if (parentStatus.isFile()) { 756 throw new ParentNotDirectoryException("Rename destination parent " 757 + parent + " is a file."); 758 } 759 } 760 renameInternal(src, dst); 761 } 762 763 /** 764 * Returns true if the file system supports symlinks, false otherwise. 765 * @return true if filesystem supports symlinks 766 */ 767 public boolean supportsSymlinks() { 768 return false; 769 } 770 771 /** 772 * The specification of this method matches that of 773 * {@link FileContext#createSymlink(Path, Path, boolean)}; 774 */ 775 public void createSymlink(final Path target, final Path link, 776 final boolean createParent) throws IOException, UnresolvedLinkException { 777 throw new IOException("File system does not support symlinks"); 778 } 779 780 /** 781 * Partially resolves the path. This is used during symlink resolution in 782 * {@link FSLinkResolver}, and differs from the similarly named method 783 * {@link FileContext#getLinkTarget(Path)}. 784 * @throws IOException subclass implementations may throw IOException 785 */ 786 public Path getLinkTarget(final Path f) throws IOException { 787 throw new AssertionError("Implementation Error: " + getClass() 788 + " that threw an UnresolvedLinkException, causing this method to be" 789 + " called, needs to override this method."); 790 } 791 792 /** 793 * The specification of this method matches that of 794 * {@link FileContext#setPermission(Path, FsPermission)} except that Path f 795 * must be for this file system. 796 */ 797 public abstract void setPermission(final Path f, 798 final FsPermission permission) throws AccessControlException, 799 FileNotFoundException, UnresolvedLinkException, IOException; 800 801 /** 802 * The specification of this method matches that of 803 * {@link FileContext#setOwner(Path, String, String)} except that Path f must 804 * be for this file system. 805 */ 806 public abstract void setOwner(final Path f, final String username, 807 final String groupname) throws AccessControlException, 808 FileNotFoundException, UnresolvedLinkException, IOException; 809 810 /** 811 * The specification of this method matches that of 812 * {@link FileContext#setTimes(Path, long, long)} except that Path f must be 813 * for this file system. 814 */ 815 public abstract void setTimes(final Path f, final long mtime, 816 final long atime) throws AccessControlException, FileNotFoundException, 817 UnresolvedLinkException, IOException; 818 819 /** 820 * The specification of this method matches that of 821 * {@link FileContext#getFileChecksum(Path)} except that Path f must be for 822 * this file system. 823 */ 824 public abstract FileChecksum getFileChecksum(final Path f) 825 throws AccessControlException, FileNotFoundException, 826 UnresolvedLinkException, IOException; 827 828 /** 829 * The specification of this method matches that of 830 * {@link FileContext#getFileStatus(Path)} 831 * except that an UnresolvedLinkException may be thrown if a symlink is 832 * encountered in the path. 833 */ 834 public abstract FileStatus getFileStatus(final Path f) 835 throws AccessControlException, FileNotFoundException, 836 UnresolvedLinkException, IOException; 837 838 /** 839 * The specification of this method matches that of 840 * {@link FileContext#access(Path, FsAction)} 841 * except that an UnresolvedLinkException may be thrown if a symlink is 842 * encountered in the path. 843 */ 844 @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"}) 845 public void access(Path path, FsAction mode) throws AccessControlException, 846 FileNotFoundException, UnresolvedLinkException, IOException { 847 FileSystem.checkAccessPermissions(this.getFileStatus(path), mode); 848 } 849 850 /** 851 * The specification of this method matches that of 852 * {@link FileContext#getFileLinkStatus(Path)} 853 * except that an UnresolvedLinkException may be thrown if a symlink is 854 * encountered in the path leading up to the final path component. 855 * If the file system does not support symlinks then the behavior is 856 * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}. 857 */ 858 public FileStatus getFileLinkStatus(final Path f) 859 throws AccessControlException, FileNotFoundException, 860 UnsupportedFileSystemException, IOException { 861 return getFileStatus(f); 862 } 863 864 /** 865 * The specification of this method matches that of 866 * {@link FileContext#getFileBlockLocations(Path, long, long)} except that 867 * Path f must be for this file system. 868 */ 869 public abstract BlockLocation[] getFileBlockLocations(final Path f, 870 final long start, final long len) throws AccessControlException, 871 FileNotFoundException, UnresolvedLinkException, IOException; 872 873 /** 874 * The specification of this method matches that of 875 * {@link FileContext#getFsStatus(Path)} except that Path f must be for this 876 * file system. 877 */ 878 public FsStatus getFsStatus(final Path f) throws AccessControlException, 879 FileNotFoundException, UnresolvedLinkException, IOException { 880 // default impl gets FsStatus of root 881 return getFsStatus(); 882 } 883 884 /** 885 * The specification of this method matches that of 886 * {@link FileContext#getFsStatus(Path)}. 887 */ 888 public abstract FsStatus getFsStatus() throws AccessControlException, 889 FileNotFoundException, IOException; 890 891 /** 892 * The specification of this method matches that of 893 * {@link FileContext#listStatus(Path)} except that Path f must be for this 894 * file system. 895 */ 896 public RemoteIterator<FileStatus> listStatusIterator(final Path f) 897 throws AccessControlException, FileNotFoundException, 898 UnresolvedLinkException, IOException { 899 return new RemoteIterator<FileStatus>() { 900 private int i = 0; 901 private FileStatus[] statusList = listStatus(f); 902 903 @Override 904 public boolean hasNext() { 905 return i < statusList.length; 906 } 907 908 @Override 909 public FileStatus next() { 910 if (!hasNext()) { 911 throw new NoSuchElementException(); 912 } 913 return statusList[i++]; 914 } 915 }; 916 } 917 918 /** 919 * The specification of this method matches that of 920 * {@link FileContext#listLocatedStatus(Path)} except that Path f 921 * must be for this file system. 922 */ 923 public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f) 924 throws AccessControlException, FileNotFoundException, 925 UnresolvedLinkException, IOException { 926 return new RemoteIterator<LocatedFileStatus>() { 927 private RemoteIterator<FileStatus> itor = listStatusIterator(f); 928 929 @Override 930 public boolean hasNext() throws IOException { 931 return itor.hasNext(); 932 } 933 934 @Override 935 public LocatedFileStatus next() throws IOException { 936 if (!hasNext()) { 937 throw new NoSuchElementException("No more entry in " + f); 938 } 939 FileStatus result = itor.next(); 940 BlockLocation[] locs = null; 941 if (result.isFile()) { 942 locs = getFileBlockLocations( 943 result.getPath(), 0, result.getLen()); 944 } 945 return new LocatedFileStatus(result, locs); 946 } 947 }; 948 } 949 950 /** 951 * The specification of this method matches that of 952 * {@link FileContext.Util#listStatus(Path)} except that Path f must be 953 * for this file system. 954 */ 955 public abstract FileStatus[] listStatus(final Path f) 956 throws AccessControlException, FileNotFoundException, 957 UnresolvedLinkException, IOException; 958 959 /** 960 * @return an iterator over the corrupt files under the given path 961 * (may contain duplicates if a file has more than one corrupt block) 962 * @throws IOException 963 */ 964 public RemoteIterator<Path> listCorruptFileBlocks(Path path) 965 throws IOException { 966 throw new UnsupportedOperationException(getClass().getCanonicalName() + 967 " does not support" + 968 " listCorruptFileBlocks"); 969 } 970 971 /** 972 * The specification of this method matches that of 973 * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f 974 * must be for this file system. 975 */ 976 public abstract void setVerifyChecksum(final boolean verifyChecksum) 977 throws AccessControlException, IOException; 978 979 /** 980 * Get a canonical name for this file system. 981 * @return a URI string that uniquely identifies this file system 982 */ 983 public String getCanonicalServiceName() { 984 return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort()); 985 } 986 987 /** 988 * Get one or more delegation tokens associated with the filesystem. Normally 989 * a file system returns a single delegation token. A file system that manages 990 * multiple file systems underneath, could return set of delegation tokens for 991 * all the file systems it manages 992 * 993 * @param renewer the account name that is allowed to renew the token. 994 * @return List of delegation tokens. 995 * If delegation tokens not supported then return a list of size zero. 996 * @throws IOException 997 */ 998 @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" }) 999 public List<Token<?>> getDelegationTokens(String renewer) throws IOException { 1000 return new ArrayList<Token<?>>(0); 1001 } 1002 1003 /** 1004 * Modifies ACL entries of files and directories. This method can add new ACL 1005 * entries or modify the permissions on existing ACL entries. All existing 1006 * ACL entries that are not specified in this call are retained without 1007 * changes. (Modifications are merged into the current ACL.) 1008 * 1009 * @param path Path to modify 1010 * @param aclSpec List<AclEntry> describing modifications 1011 * @throws IOException if an ACL could not be modified 1012 */ 1013 public void modifyAclEntries(Path path, List<AclEntry> aclSpec) 1014 throws IOException { 1015 throw new UnsupportedOperationException(getClass().getSimpleName() 1016 + " doesn't support modifyAclEntries"); 1017 } 1018 1019 /** 1020 * Removes ACL entries from files and directories. Other ACL entries are 1021 * retained. 1022 * 1023 * @param path Path to modify 1024 * @param aclSpec List<AclEntry> describing entries to remove 1025 * @throws IOException if an ACL could not be modified 1026 */ 1027 public void removeAclEntries(Path path, List<AclEntry> aclSpec) 1028 throws IOException { 1029 throw new UnsupportedOperationException(getClass().getSimpleName() 1030 + " doesn't support removeAclEntries"); 1031 } 1032 1033 /** 1034 * Removes all default ACL entries from files and directories. 1035 * 1036 * @param path Path to modify 1037 * @throws IOException if an ACL could not be modified 1038 */ 1039 public void removeDefaultAcl(Path path) 1040 throws IOException { 1041 throw new UnsupportedOperationException(getClass().getSimpleName() 1042 + " doesn't support removeDefaultAcl"); 1043 } 1044 1045 /** 1046 * Removes all but the base ACL entries of files and directories. The entries 1047 * for user, group, and others are retained for compatibility with permission 1048 * bits. 1049 * 1050 * @param path Path to modify 1051 * @throws IOException if an ACL could not be removed 1052 */ 1053 public void removeAcl(Path path) 1054 throws IOException { 1055 throw new UnsupportedOperationException(getClass().getSimpleName() 1056 + " doesn't support removeAcl"); 1057 } 1058 1059 /** 1060 * Fully replaces ACL of files and directories, discarding all existing 1061 * entries. 1062 * 1063 * @param path Path to modify 1064 * @param aclSpec List<AclEntry> describing modifications, must include entries 1065 * for user, group, and others for compatibility with permission bits. 1066 * @throws IOException if an ACL could not be modified 1067 */ 1068 public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { 1069 throw new UnsupportedOperationException(getClass().getSimpleName() 1070 + " doesn't support setAcl"); 1071 } 1072 1073 /** 1074 * Gets the ACLs of files and directories. 1075 * 1076 * @param path Path to get 1077 * @return RemoteIterator<AclStatus> which returns each AclStatus 1078 * @throws IOException if an ACL could not be read 1079 */ 1080 public AclStatus getAclStatus(Path path) throws IOException { 1081 throw new UnsupportedOperationException(getClass().getSimpleName() 1082 + " doesn't support getAclStatus"); 1083 } 1084 1085 /** 1086 * Set an xattr of a file or directory. 1087 * The name must be prefixed with the namespace followed by ".". For example, 1088 * "user.attr". 1089 * <p/> 1090 * Refer to the HDFS extended attributes user documentation for details. 1091 * 1092 * @param path Path to modify 1093 * @param name xattr name. 1094 * @param value xattr value. 1095 * @throws IOException 1096 */ 1097 public void setXAttr(Path path, String name, byte[] value) 1098 throws IOException { 1099 setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE, 1100 XAttrSetFlag.REPLACE)); 1101 } 1102 1103 /** 1104 * Set an xattr of a file or directory. 1105 * The name must be prefixed with the namespace followed by ".". For example, 1106 * "user.attr". 1107 * <p/> 1108 * Refer to the HDFS extended attributes user documentation for details. 1109 * 1110 * @param path Path to modify 1111 * @param name xattr name. 1112 * @param value xattr value. 1113 * @param flag xattr set flag 1114 * @throws IOException 1115 */ 1116 public void setXAttr(Path path, String name, byte[] value, 1117 EnumSet<XAttrSetFlag> flag) throws IOException { 1118 throw new UnsupportedOperationException(getClass().getSimpleName() 1119 + " doesn't support setXAttr"); 1120 } 1121 1122 /** 1123 * Get an xattr for a file or directory. 1124 * The name must be prefixed with the namespace followed by ".". For example, 1125 * "user.attr". 1126 * <p/> 1127 * Refer to the HDFS extended attributes user documentation for details. 1128 * 1129 * @param path Path to get extended attribute 1130 * @param name xattr name. 1131 * @return byte[] xattr value. 1132 * @throws IOException 1133 */ 1134 public byte[] getXAttr(Path path, String name) throws IOException { 1135 throw new UnsupportedOperationException(getClass().getSimpleName() 1136 + " doesn't support getXAttr"); 1137 } 1138 1139 /** 1140 * Get all of the xattrs for a file or directory. 1141 * Only those xattrs for which the logged-in user has permissions to view 1142 * are returned. 1143 * <p/> 1144 * Refer to the HDFS extended attributes user documentation for details. 1145 * 1146 * @param path Path to get extended attributes 1147 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1148 * @throws IOException 1149 */ 1150 public Map<String, byte[]> getXAttrs(Path path) throws IOException { 1151 throw new UnsupportedOperationException(getClass().getSimpleName() 1152 + " doesn't support getXAttrs"); 1153 } 1154 1155 /** 1156 * Get all of the xattrs for a file or directory. 1157 * Only those xattrs for which the logged-in user has permissions to view 1158 * are returned. 1159 * <p/> 1160 * Refer to the HDFS extended attributes user documentation for details. 1161 * 1162 * @param path Path to get extended attributes 1163 * @param names XAttr names. 1164 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1165 * @throws IOException 1166 */ 1167 public Map<String, byte[]> getXAttrs(Path path, List<String> names) 1168 throws IOException { 1169 throw new UnsupportedOperationException(getClass().getSimpleName() 1170 + " doesn't support getXAttrs"); 1171 } 1172 1173 /** 1174 * Get all of the xattr names for a file or directory. 1175 * Only the xattr names for which the logged-in user has permissions to view 1176 * are returned. 1177 * <p/> 1178 * Refer to the HDFS extended attributes user documentation for details. 1179 * 1180 * @param path Path to get extended attributes 1181 * @return Map<String, byte[]> describing the XAttrs of the file or directory 1182 * @throws IOException 1183 */ 1184 public List<String> listXAttrs(Path path) 1185 throws IOException { 1186 throw new UnsupportedOperationException(getClass().getSimpleName() 1187 + " doesn't support listXAttrs"); 1188 } 1189 1190 /** 1191 * Remove an xattr of a file or directory. 1192 * The name must be prefixed with the namespace followed by ".". For example, 1193 * "user.attr". 1194 * <p/> 1195 * Refer to the HDFS extended attributes user documentation for details. 1196 * 1197 * @param path Path to remove extended attribute 1198 * @param name xattr name 1199 * @throws IOException 1200 */ 1201 public void removeXAttr(Path path, String name) throws IOException { 1202 throw new UnsupportedOperationException(getClass().getSimpleName() 1203 + " doesn't support removeXAttr"); 1204 } 1205 1206 @Override //Object 1207 public int hashCode() { 1208 return myUri.hashCode(); 1209 } 1210 1211 @Override //Object 1212 public boolean equals(Object other) { 1213 if (other == null || !(other instanceof AbstractFileSystem)) { 1214 return false; 1215 } 1216 return myUri.equals(((AbstractFileSystem) other).myUri); 1217 } 1218}