@InterfaceAudience.Public @InterfaceStability.Evolving public class HdfsAdmin extends Object
Constructor and Description |
---|
HdfsAdmin(URI uri,
org.apache.hadoop.conf.Configuration conf)
Create a new HdfsAdmin client.
|
Modifier and Type | Method and Description |
---|---|
long |
addCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags)
Add a new CacheDirectiveInfo.
|
void |
addCachePool(CachePoolInfo info)
Add a cache pool.
|
void |
allowSnapshot(org.apache.hadoop.fs.Path path)
Allow snapshot on a directory.
|
void |
clearQuota(org.apache.hadoop.fs.Path src)
Clear the namespace quota (count of files, directories and sym links) for a
directory.
|
void |
clearQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type)
Clear the space quota by storage type for a directory.
|
void |
clearSpaceQuota(org.apache.hadoop.fs.Path src)
Clear the storage space quota (size of files) for a directory.
|
void |
createEncryptionZone(org.apache.hadoop.fs.Path path,
String keyName)
Deprecated.
|
void |
createEncryptionZone(org.apache.hadoop.fs.Path path,
String keyName,
EnumSet<CreateEncryptionZoneFlag> flags)
Create an encryption zone rooted at an empty existing directory, using the
specified encryption key.
|
void |
disallowSnapshot(org.apache.hadoop.fs.Path path)
Disallow snapshot on a directory.
|
Collection<? extends org.apache.hadoop.fs.BlockStoragePolicySpi> |
getAllStoragePolicies()
Retrieve all the storage policies supported by HDFS file system.
|
EncryptionZone |
getEncryptionZoneForPath(org.apache.hadoop.fs.Path path)
Get the path of the encryption zone for a given file or directory.
|
org.apache.hadoop.fs.FileEncryptionInfo |
getFileEncryptionInfo(org.apache.hadoop.fs.Path path)
Returns the FileEncryptionInfo on the HdfsFileStatus for the given path.
|
DFSInotifyEventInputStream |
getInotifyEventStream()
Exposes a stream of namesystem events.
|
DFSInotifyEventInputStream |
getInotifyEventStream(long lastReadTxid)
A version of
getInotifyEventStream() meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. |
org.apache.hadoop.crypto.key.KeyProvider |
getKeyProvider()
Get KeyProvider if present.
|
org.apache.hadoop.fs.BlockStoragePolicySpi |
getStoragePolicy(org.apache.hadoop.fs.Path src)
Query the effective storage policy ID for the given file or directory.
|
org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> |
listCacheDirectives(CacheDirectiveInfo filter)
List cache directives.
|
org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> |
listCachePools()
List all cache pools.
|
org.apache.hadoop.fs.RemoteIterator<EncryptionZone> |
listEncryptionZones()
Returns a RemoteIterator which can be used to list the encryption zones
in HDFS.
|
org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> |
listOpenFiles()
Returns a RemoteIterator which can be used to list all open files
currently managed by the NameNode.
|
void |
modifyCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags)
Modify a CacheDirective.
|
void |
modifyCachePool(CachePoolInfo info)
Modify an existing cache pool.
|
void |
provisionEncryptionZoneTrash(org.apache.hadoop.fs.Path path)
Provision a trash directory for a given encryption zone.
|
void |
removeCacheDirective(long id)
Remove a CacheDirective.
|
void |
removeCachePool(String poolName)
Remove a cache pool.
|
void |
setQuota(org.apache.hadoop.fs.Path src,
long quota)
Set the namespace quota (count of files, directories, and sym links) for a
directory.
|
void |
setQuotaByStorageType(org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.StorageType type,
long quota)
Set the quota by storage type for a directory.
|
void |
setSpaceQuota(org.apache.hadoop.fs.Path src,
long spaceQuota)
Set the storage space quota (size of files) for a directory.
|
void |
setStoragePolicy(org.apache.hadoop.fs.Path src,
String policyName)
Set the source path to the specified storage policy.
|
void |
unsetStoragePolicy(org.apache.hadoop.fs.Path src)
Unset the storage policy set for a given file or directory.
|
public HdfsAdmin(URI uri, org.apache.hadoop.conf.Configuration conf) throws IOException
uri
- the unique URI of the HDFS file system to administerconf
- configurationIOException
- in the event the file system could not be createdpublic void setQuota(org.apache.hadoop.fs.Path src, long quota) throws IOException
src
- the path to set the quota forquota
- the value to set for the quotaIOException
- in the event of errorpublic void clearQuota(org.apache.hadoop.fs.Path src) throws IOException
src
- the path to clear the quota ofIOException
- in the event of errorpublic void setSpaceQuota(org.apache.hadoop.fs.Path src, long spaceQuota) throws IOException
src
- the path to set the space quota ofspaceQuota
- the value to set for the space quotaIOException
- in the event of errorpublic void clearSpaceQuota(org.apache.hadoop.fs.Path src) throws IOException
src
- the path to clear the space quota ofIOException
- in the event of errorpublic void setQuotaByStorageType(org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.StorageType type, long quota) throws IOException
src
- the target directory to set the quota by storage typetype
- the storage type to set for quota by storage typequota
- the value to set for quota by storage typeIOException
- in the event of errorpublic void clearQuotaByStorageType(org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.StorageType type) throws IOException
src
- the target directory to clear the quota by storage typetype
- the storage type to clear for quota by storage typeIOException
- in the event of errorpublic void allowSnapshot(org.apache.hadoop.fs.Path path) throws IOException
path
- The path of the directory where snapshots will be taken.IOException
public void disallowSnapshot(org.apache.hadoop.fs.Path path) throws IOException
path
- The path of the snapshottable directory.IOException
public long addCacheDirective(CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException
info
- Information about a directive to add.flags
- CacheFlag
s to use for this operation.IOException
- if the directive could not be addedpublic void modifyCacheDirective(CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException
info
- Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.flags
- CacheFlag
s to use for this operation.IOException
- if the directive could not be modifiedpublic void removeCacheDirective(long id) throws IOException
id
- identifier of the CacheDirectiveInfo to removeIOException
- if the directive could not be removedpublic org.apache.hadoop.fs.RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws IOException
filter
- Filter parameters to use when listing the directives, null to
list all directives visible to us.IOException
public void addCachePool(CachePoolInfo info) throws IOException
info
- The request to add a cache pool.IOException
- If the request could not be completed.public void modifyCachePool(CachePoolInfo info) throws IOException
info
- The request to modify a cache pool.IOException
- If the request could not be completed.public void removeCachePool(String poolName) throws IOException
poolName
- Name of the cache pool to remove.IOException
- if the cache pool did not exist, or could not be removed.public org.apache.hadoop.fs.RemoteIterator<CachePoolEntry> listCachePools() throws IOException
IOException
- If there was an error listing cache pools.public org.apache.hadoop.crypto.key.KeyProvider getKeyProvider() throws IOException
IOException
- on RPC exception to the NN.@Deprecated public void createEncryptionZone(org.apache.hadoop.fs.Path path, String keyName) throws IOException, org.apache.hadoop.security.AccessControlException, FileNotFoundException
path
- The path of the root of the encryption zone. Must refer to
an empty, existing directory.keyName
- Name of key available at the KeyProvider.IOException
- if there was a general IO exceptionorg.apache.hadoop.security.AccessControlException
- if the caller does not have access to pathFileNotFoundException
- if the path does not existpublic void createEncryptionZone(org.apache.hadoop.fs.Path path, String keyName, EnumSet<CreateEncryptionZoneFlag> flags) throws IOException, org.apache.hadoop.security.AccessControlException, FileNotFoundException, org.apache.hadoop.HadoopIllegalArgumentException
CreateEncryptionZoneFlag
flags.path
- The path of the root of the encryption zone. Must refer to
an empty, existing directory.keyName
- Name of key available at the KeyProvider.flags
- flags for this operation.IOException
- if there was a general IO exceptionorg.apache.hadoop.security.AccessControlException
- if the caller does not have access to pathFileNotFoundException
- if the path does not existorg.apache.hadoop.HadoopIllegalArgumentException
- if the flags are invalidpublic void provisionEncryptionZoneTrash(org.apache.hadoop.fs.Path path) throws IOException
path
- the root of the encryption zoneIOException
- if the trash directory can not be created.public EncryptionZone getEncryptionZoneForPath(org.apache.hadoop.fs.Path path) throws IOException, org.apache.hadoop.security.AccessControlException
path
- The path to get the ez for.IOException
- if there was a general IO exceptionorg.apache.hadoop.security.AccessControlException
- if the caller does not have access to pathpublic org.apache.hadoop.fs.RemoteIterator<EncryptionZone> listEncryptionZones() throws IOException
IOException
public org.apache.hadoop.fs.FileEncryptionInfo getFileEncryptionInfo(org.apache.hadoop.fs.Path path) throws IOException
FileNotFoundException
- if the path does not exist.org.apache.hadoop.security.AccessControlException
- if no execute permission on parent path.IOException
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException
DFSInotifyEventInputStream
for information on stream usage.
See Event
for information on the available events.
Inotify users may want to tune the following HDFS parameters to
ensure that enough extra HDFS edits are saved to support inotify clients
that fall behind the current state of the namespace while reading events.
The default parameter values should generally be reasonable. If edits are
deleted before their corresponding events can be read, clients will see a
MissingEventsException
on
DFSInotifyEventInputStream
method calls.
It should generally be sufficient to tune these parameters:
dfs.namenode.num.extra.edits.retained
dfs.namenode.max.extra.edits.segments.retained
Parameters that affect the number of created segments and the number of
edits that are considered necessary, i.e. do not count towards the
dfs.namenode.num.extra.edits.retained quota):
dfs.namenode.checkpoint.period
dfs.namenode.checkpoint.txns
dfs.namenode.num.checkpoints.retained
dfs.ha.log-roll.period
It is recommended that local journaling be configured
(dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
so that edit transfers from the shared journal can be avoided.IOException
- If there was an error obtaining the stream.public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) throws IOException
getInotifyEventStream()
meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. because they
have access to an FSImage inclusive of lastReadTxid) and only want to read
events after this point.IOException
public void setStoragePolicy(org.apache.hadoop.fs.Path src, String policyName) throws IOException
src
- The source path referring to either a directory or a file.policyName
- The name of the storage policy.IOException
public void unsetStoragePolicy(org.apache.hadoop.fs.Path src) throws IOException
src
- file or directory path.IOException
public org.apache.hadoop.fs.BlockStoragePolicySpi getStoragePolicy(org.apache.hadoop.fs.Path src) throws IOException
src
- file or directory path.IOException
public Collection<? extends org.apache.hadoop.fs.BlockStoragePolicySpi> getAllStoragePolicies() throws IOException
IOException
public org.apache.hadoop.fs.RemoteIterator<org.apache.hadoop.hdfs.protocol.OpenFileEntry> listOpenFiles() throws IOException
IOException
Copyright © 2018 Apache Software Foundation. All Rights Reserved.