public class ReplicaInPipeline extends org.apache.hadoop.hdfs.server.datanode.ReplicaInfo implements ReplicaInPipelineInterface
ReplicaInfo.ReplicaDirInfo
Constructor and Description |
---|
ReplicaInPipeline(long blockId,
long genStamp,
FsVolumeSpi vol,
File dir,
long bytesToReserve)
Constructor for a zero length replica
|
ReplicaInPipeline(ReplicaInPipeline from)
Copy constructor.
|
Modifier and Type | Method and Description |
---|---|
boolean |
attemptToSetWriter(Thread prevWriter,
Thread newWriter)
Attempt to set the writer to a new value.
|
OutputStream |
createRestartMetaStream()
Create an output stream to write restart metadata in case of datanode
shutting down for quick restart.
|
ReplicaOutputStreams |
createStreams(boolean isCreate,
org.apache.hadoop.util.DataChecksum requestedChecksum)
Create output streams for writing to this replica,
one for block file and one for CRC file
|
boolean |
equals(Object o) |
long |
getBytesAcked()
Get the number of bytes acked
|
long |
getBytesOnDisk()
Get the number of bytes that have written to disk
|
long |
getBytesReserved()
Number of bytes reserved for this replica on disk.
|
ChunkChecksum |
getLastChecksumAndDataLen()
gets the last chunk checksum and the length of the block corresponding
to that checksum
|
long |
getOriginalBytesReserved()
Number of bytes originally reserved for this replica.
|
HdfsServerConstants.ReplicaState |
getState()
Get the replica state
|
long |
getVisibleLength()
Get the number of bytes that are visible to readers
|
int |
hashCode() |
void |
interruptThread() |
void |
releaseAllBytesReserved()
Release any disk space reserved for this replica.
|
void |
setBytesAcked(long bytesAcked)
Set the number bytes that have acked
|
void |
setLastChecksumAndDataLen(long dataLength,
byte[] lastChecksum)
store the checksum for the last chunk along with the data length
|
void |
stopWriter(long xceiverStopTimeout)
Interrupt the writing thread and wait until it dies
|
String |
toString() |
breakHardLinksIfNeeded, getBlockFile, getMetaFile, getNext, getStorageUuid, getVolume, isOnTransientStorage, parseBaseDir, setDir, setNext
appendStringTo, compareTo, filename2id, getBlockId, getBlockId, getBlockName, getGenerationStamp, getGenerationStamp, getNumBytes, isBlockFilename, isMetaFilename, matchingIdAndGenStamp, metaToBlockFile, readFields, readId, set, setBlockId, setGenerationStamp, setNumBytes, toString, write, writeId
clone, finalize, getClass, notify, notifyAll, wait, wait, wait
setNumBytes
public ReplicaInPipeline(long blockId, long genStamp, FsVolumeSpi vol, File dir, long bytesToReserve)
blockId
- block idgenStamp
- replica generation stampvol
- volume where replica is locateddir
- directory path where block and meta files are locatedbytesToReserve
- disk space to reserve for this replica, based on
the estimated maximum block length.public ReplicaInPipeline(ReplicaInPipeline from)
from
- where to copy frompublic long getVisibleLength()
org.apache.hadoop.hdfs.server.datanode.Replica
getVisibleLength
in interface org.apache.hadoop.hdfs.server.datanode.Replica
public HdfsServerConstants.ReplicaState getState()
org.apache.hadoop.hdfs.server.datanode.Replica
getState
in interface org.apache.hadoop.hdfs.server.datanode.Replica
public long getBytesAcked()
ReplicaInPipelineInterface
getBytesAcked
in interface ReplicaInPipelineInterface
public void setBytesAcked(long bytesAcked)
ReplicaInPipelineInterface
setBytesAcked
in interface ReplicaInPipelineInterface
bytesAcked
- number bytes ackedpublic long getBytesOnDisk()
org.apache.hadoop.hdfs.server.datanode.Replica
getBytesOnDisk
in interface org.apache.hadoop.hdfs.server.datanode.Replica
public long getBytesReserved()
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo
getBytesReserved
in class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo
public long getOriginalBytesReserved()
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo
getOriginalBytesReserved
in class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo
public void releaseAllBytesReserved()
ReplicaInPipelineInterface
releaseAllBytesReserved
in interface ReplicaInPipelineInterface
public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum)
ReplicaInPipelineInterface
setLastChecksumAndDataLen
in interface ReplicaInPipelineInterface
dataLength
- number of bytes on disklastChecksum
- - checksum bytes for the last chunkpublic ChunkChecksum getLastChecksumAndDataLen()
ReplicaInPipelineInterface
getLastChecksumAndDataLen
in interface ReplicaInPipelineInterface
public void interruptThread()
public boolean equals(Object o)
equals
in class org.apache.hadoop.hdfs.protocol.Block
public boolean attemptToSetWriter(Thread prevWriter, Thread newWriter)
public void stopWriter(long xceiverStopTimeout) throws IOException
IOException
- the waiting is interruptedpublic int hashCode()
hashCode
in class org.apache.hadoop.hdfs.protocol.Block
public ReplicaOutputStreams createStreams(boolean isCreate, org.apache.hadoop.util.DataChecksum requestedChecksum) throws IOException
ReplicaInPipelineInterface
createStreams
in interface ReplicaInPipelineInterface
isCreate
- if it is for creationrequestedChecksum
- the checksum the writer would prefer to useIOException
- if any error occurspublic OutputStream createRestartMetaStream() throws IOException
ReplicaInPipelineInterface
createRestartMetaStream
in interface ReplicaInPipelineInterface
IOException
- if any error occurspublic String toString()
toString
in class org.apache.hadoop.hdfs.server.datanode.ReplicaInfo
Copyright © 2017 Apache Software Foundation. All Rights Reserved.