001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.datanode;
019
020import java.io.File;
021
022import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
023import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
024import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
025
026/**
027 * This class represents replicas that are under block recovery
028 * It has a recovery id that is equal to the generation stamp 
029 * that the replica will be bumped to after recovery
030 * The recovery id is used to handle multiple concurrent block recoveries.
031 * A recovery with higher recovery id preempts recoveries with a lower id.
032 *
033 */
034public class ReplicaUnderRecovery extends ReplicaInfo {
035  private ReplicaInfo original; // the original replica that needs to be recovered
036  private long recoveryId; // recovery id; it is also the generation stamp 
037                           // that the replica will be bumped to after recovery
038
039  public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
040    super(replica, replica.getVolume(), replica.getDir());
041    if ( replica.getState() != ReplicaState.FINALIZED &&
042         replica.getState() != ReplicaState.RBW &&
043         replica.getState() != ReplicaState.RWR ) {
044      throw new IllegalArgumentException("Cannot recover replica: " + replica);
045    }
046    this.original = replica;
047    this.recoveryId = recoveryId;
048  }
049
050  /**
051   * Copy constructor.
052   * @param from where to copy from
053   */
054  public ReplicaUnderRecovery(ReplicaUnderRecovery from) {
055    super(from);
056    this.original = from.getOriginalReplica();
057    this.recoveryId = from.getRecoveryID();
058  }
059
060  /** 
061   * Get the recovery id
062   * @return the generation stamp that the replica will be bumped to 
063   */
064  public long getRecoveryID() {
065    return recoveryId;
066  }
067
068  /** 
069   * Set the recovery id
070   * @param recoveryId the new recoveryId
071   */
072  public void setRecoveryID(long recoveryId) {
073    if (recoveryId > this.recoveryId) {
074      this.recoveryId = recoveryId;
075    } else {
076      throw new IllegalArgumentException("The new recovery id: " + recoveryId
077          + " must be greater than the current one: " + this.recoveryId);
078    }
079  }
080
081  /**
082   * Get the original replica that's under recovery
083   * @return the original replica under recovery
084   */
085  public ReplicaInfo getOriginalReplica() {
086    return original;
087  }
088  
089  @Override //ReplicaInfo
090  public ReplicaState getState() {
091    return ReplicaState.RUR;
092  }
093  
094  @Override
095  public long getVisibleLength() {
096    return original.getVisibleLength();
097  }
098
099  @Override
100  public long getBytesOnDisk() {
101    return original.getBytesOnDisk();
102  }
103
104  @Override  //org.apache.hadoop.hdfs.protocol.Block
105  public void setBlockId(long blockId) {
106    super.setBlockId(blockId);
107    original.setBlockId(blockId);
108  }
109
110  @Override //org.apache.hadoop.hdfs.protocol.Block
111  public void setGenerationStamp(long gs) {
112    super.setGenerationStamp(gs);
113    original.setGenerationStamp(gs);
114  }
115  
116  @Override //org.apache.hadoop.hdfs.protocol.Block
117  public void setNumBytes(long numBytes) {
118    super.setNumBytes(numBytes);
119    original.setNumBytes(numBytes);
120  }
121  
122  @Override //ReplicaInfo
123  public void setDir(File dir) {
124    super.setDir(dir);
125    original.setDir(dir);
126  }
127  
128  @Override //ReplicaInfo
129  void setVolume(FsVolumeSpi vol) {
130    super.setVolume(vol);
131    original.setVolume(vol);
132  }
133  
134  @Override  // Object
135  public boolean equals(Object o) {
136    return super.equals(o);
137  }
138  
139  @Override  // Object
140  public int hashCode() {
141    return super.hashCode();
142  }
143
144  @Override
145  public String toString() {
146    return super.toString()
147        + "\n  recoveryId=" + recoveryId
148        + "\n  original=" + original;
149  }
150
151  public ReplicaRecoveryInfo createInfo() {
152    return new ReplicaRecoveryInfo(original.getBlockId(), 
153        original.getBytesOnDisk(), original.getGenerationStamp(),
154        original.getState()); 
155  }
156}