001// Generated by the protocol buffer compiler.  DO NOT EDIT!
002// source: fsimage.proto
003
004package org.apache.hadoop.hdfs.server.namenode;
005
006public final class FsImageProto {
007  private FsImageProto() {}
008  public static void registerAllExtensions(
009      com.google.protobuf.ExtensionRegistry registry) {
010  }
011  public interface FileSummaryOrBuilder
012      extends com.google.protobuf.MessageOrBuilder {
013
014    // required uint32 ondiskVersion = 1;
015    /**
016     * <code>required uint32 ondiskVersion = 1;</code>
017     *
018     * <pre>
019     * The version of the above EBNF grammars.
020     * </pre>
021     */
022    boolean hasOndiskVersion();
023    /**
024     * <code>required uint32 ondiskVersion = 1;</code>
025     *
026     * <pre>
027     * The version of the above EBNF grammars.
028     * </pre>
029     */
030    int getOndiskVersion();
031
032    // required uint32 layoutVersion = 2;
033    /**
034     * <code>required uint32 layoutVersion = 2;</code>
035     *
036     * <pre>
037     * layoutVersion describes which features are available in the
038     * FSImage.
039     * </pre>
040     */
041    boolean hasLayoutVersion();
042    /**
043     * <code>required uint32 layoutVersion = 2;</code>
044     *
045     * <pre>
046     * layoutVersion describes which features are available in the
047     * FSImage.
048     * </pre>
049     */
050    int getLayoutVersion();
051
052    // optional string codec = 3;
053    /**
054     * <code>optional string codec = 3;</code>
055     */
056    boolean hasCodec();
057    /**
058     * <code>optional string codec = 3;</code>
059     */
060    java.lang.String getCodec();
061    /**
062     * <code>optional string codec = 3;</code>
063     */
064    com.google.protobuf.ByteString
065        getCodecBytes();
066
067    // repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;
068    /**
069     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
070     */
071    java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> 
072        getSectionsList();
073    /**
074     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
075     */
076    org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index);
077    /**
078     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
079     */
080    int getSectionsCount();
081    /**
082     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
083     */
084    java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
085        getSectionsOrBuilderList();
086    /**
087     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
088     */
089    org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
090        int index);
091  }
092  /**
093   * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary}
094   */
095  public static final class FileSummary extends
096      com.google.protobuf.GeneratedMessage
097      implements FileSummaryOrBuilder {
098    // Use FileSummary.newBuilder() to construct.
099    private FileSummary(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
100      super(builder);
101      this.unknownFields = builder.getUnknownFields();
102    }
103    private FileSummary(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
104
105    private static final FileSummary defaultInstance;
106    public static FileSummary getDefaultInstance() {
107      return defaultInstance;
108    }
109
110    public FileSummary getDefaultInstanceForType() {
111      return defaultInstance;
112    }
113
114    private final com.google.protobuf.UnknownFieldSet unknownFields;
115    @java.lang.Override
116    public final com.google.protobuf.UnknownFieldSet
117        getUnknownFields() {
118      return this.unknownFields;
119    }
120    private FileSummary(
121        com.google.protobuf.CodedInputStream input,
122        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
123        throws com.google.protobuf.InvalidProtocolBufferException {
124      initFields();
125      int mutable_bitField0_ = 0;
126      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
127          com.google.protobuf.UnknownFieldSet.newBuilder();
128      try {
129        boolean done = false;
130        while (!done) {
131          int tag = input.readTag();
132          switch (tag) {
133            case 0:
134              done = true;
135              break;
136            default: {
137              if (!parseUnknownField(input, unknownFields,
138                                     extensionRegistry, tag)) {
139                done = true;
140              }
141              break;
142            }
143            case 8: {
144              bitField0_ |= 0x00000001;
145              ondiskVersion_ = input.readUInt32();
146              break;
147            }
148            case 16: {
149              bitField0_ |= 0x00000002;
150              layoutVersion_ = input.readUInt32();
151              break;
152            }
153            case 26: {
154              bitField0_ |= 0x00000004;
155              codec_ = input.readBytes();
156              break;
157            }
158            case 34: {
159              if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
160                sections_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section>();
161                mutable_bitField0_ |= 0x00000008;
162              }
163              sections_.add(input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.PARSER, extensionRegistry));
164              break;
165            }
166          }
167        }
168      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
169        throw e.setUnfinishedMessage(this);
170      } catch (java.io.IOException e) {
171        throw new com.google.protobuf.InvalidProtocolBufferException(
172            e.getMessage()).setUnfinishedMessage(this);
173      } finally {
174        if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
175          sections_ = java.util.Collections.unmodifiableList(sections_);
176        }
177        this.unknownFields = unknownFields.build();
178        makeExtensionsImmutable();
179      }
180    }
181    public static final com.google.protobuf.Descriptors.Descriptor
182        getDescriptor() {
183      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
184    }
185
186    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
187        internalGetFieldAccessorTable() {
188      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable
189          .ensureFieldAccessorsInitialized(
190              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class);
191    }
192
193    public static com.google.protobuf.Parser<FileSummary> PARSER =
194        new com.google.protobuf.AbstractParser<FileSummary>() {
195      public FileSummary parsePartialFrom(
196          com.google.protobuf.CodedInputStream input,
197          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
198          throws com.google.protobuf.InvalidProtocolBufferException {
199        return new FileSummary(input, extensionRegistry);
200      }
201    };
202
203    @java.lang.Override
204    public com.google.protobuf.Parser<FileSummary> getParserForType() {
205      return PARSER;
206    }
207
208    public interface SectionOrBuilder
209        extends com.google.protobuf.MessageOrBuilder {
210
211      // optional string name = 1;
212      /**
213       * <code>optional string name = 1;</code>
214       */
215      boolean hasName();
216      /**
217       * <code>optional string name = 1;</code>
218       */
219      java.lang.String getName();
220      /**
221       * <code>optional string name = 1;</code>
222       */
223      com.google.protobuf.ByteString
224          getNameBytes();
225
226      // optional uint64 length = 2;
227      /**
228       * <code>optional uint64 length = 2;</code>
229       */
230      boolean hasLength();
231      /**
232       * <code>optional uint64 length = 2;</code>
233       */
234      long getLength();
235
236      // optional uint64 offset = 3;
237      /**
238       * <code>optional uint64 offset = 3;</code>
239       */
240      boolean hasOffset();
241      /**
242       * <code>optional uint64 offset = 3;</code>
243       */
244      long getOffset();
245    }
246    /**
247     * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section}
248     *
249     * <pre>
250     * index for each section
251     * </pre>
252     */
253    public static final class Section extends
254        com.google.protobuf.GeneratedMessage
255        implements SectionOrBuilder {
256      // Use Section.newBuilder() to construct.
257      private Section(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
258        super(builder);
259        this.unknownFields = builder.getUnknownFields();
260      }
261      private Section(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
262
263      private static final Section defaultInstance;
264      public static Section getDefaultInstance() {
265        return defaultInstance;
266      }
267
268      public Section getDefaultInstanceForType() {
269        return defaultInstance;
270      }
271
272      private final com.google.protobuf.UnknownFieldSet unknownFields;
273      @java.lang.Override
274      public final com.google.protobuf.UnknownFieldSet
275          getUnknownFields() {
276        return this.unknownFields;
277      }
278      private Section(
279          com.google.protobuf.CodedInputStream input,
280          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281          throws com.google.protobuf.InvalidProtocolBufferException {
282        initFields();
283        int mutable_bitField0_ = 0;
284        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
285            com.google.protobuf.UnknownFieldSet.newBuilder();
286        try {
287          boolean done = false;
288          while (!done) {
289            int tag = input.readTag();
290            switch (tag) {
291              case 0:
292                done = true;
293                break;
294              default: {
295                if (!parseUnknownField(input, unknownFields,
296                                       extensionRegistry, tag)) {
297                  done = true;
298                }
299                break;
300              }
301              case 10: {
302                bitField0_ |= 0x00000001;
303                name_ = input.readBytes();
304                break;
305              }
306              case 16: {
307                bitField0_ |= 0x00000002;
308                length_ = input.readUInt64();
309                break;
310              }
311              case 24: {
312                bitField0_ |= 0x00000004;
313                offset_ = input.readUInt64();
314                break;
315              }
316            }
317          }
318        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
319          throw e.setUnfinishedMessage(this);
320        } catch (java.io.IOException e) {
321          throw new com.google.protobuf.InvalidProtocolBufferException(
322              e.getMessage()).setUnfinishedMessage(this);
323        } finally {
324          this.unknownFields = unknownFields.build();
325          makeExtensionsImmutable();
326        }
327      }
328      public static final com.google.protobuf.Descriptors.Descriptor
329          getDescriptor() {
330        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
331      }
332
333      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
334          internalGetFieldAccessorTable() {
335        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable
336            .ensureFieldAccessorsInitialized(
337                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class);
338      }
339
340      public static com.google.protobuf.Parser<Section> PARSER =
341          new com.google.protobuf.AbstractParser<Section>() {
342        public Section parsePartialFrom(
343            com.google.protobuf.CodedInputStream input,
344            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
345            throws com.google.protobuf.InvalidProtocolBufferException {
346          return new Section(input, extensionRegistry);
347        }
348      };
349
350      @java.lang.Override
351      public com.google.protobuf.Parser<Section> getParserForType() {
352        return PARSER;
353      }
354
355      private int bitField0_;
356      // optional string name = 1;
357      public static final int NAME_FIELD_NUMBER = 1;
358      private java.lang.Object name_;
359      /**
360       * <code>optional string name = 1;</code>
361       */
362      public boolean hasName() {
363        return ((bitField0_ & 0x00000001) == 0x00000001);
364      }
365      /**
366       * <code>optional string name = 1;</code>
367       */
368      public java.lang.String getName() {
369        java.lang.Object ref = name_;
370        if (ref instanceof java.lang.String) {
371          return (java.lang.String) ref;
372        } else {
373          com.google.protobuf.ByteString bs = 
374              (com.google.protobuf.ByteString) ref;
375          java.lang.String s = bs.toStringUtf8();
376          if (bs.isValidUtf8()) {
377            name_ = s;
378          }
379          return s;
380        }
381      }
382      /**
383       * <code>optional string name = 1;</code>
384       */
385      public com.google.protobuf.ByteString
386          getNameBytes() {
387        java.lang.Object ref = name_;
388        if (ref instanceof java.lang.String) {
389          com.google.protobuf.ByteString b = 
390              com.google.protobuf.ByteString.copyFromUtf8(
391                  (java.lang.String) ref);
392          name_ = b;
393          return b;
394        } else {
395          return (com.google.protobuf.ByteString) ref;
396        }
397      }
398
399      // optional uint64 length = 2;
400      public static final int LENGTH_FIELD_NUMBER = 2;
401      private long length_;
402      /**
403       * <code>optional uint64 length = 2;</code>
404       */
405      public boolean hasLength() {
406        return ((bitField0_ & 0x00000002) == 0x00000002);
407      }
408      /**
409       * <code>optional uint64 length = 2;</code>
410       */
411      public long getLength() {
412        return length_;
413      }
414
415      // optional uint64 offset = 3;
416      public static final int OFFSET_FIELD_NUMBER = 3;
417      private long offset_;
418      /**
419       * <code>optional uint64 offset = 3;</code>
420       */
421      public boolean hasOffset() {
422        return ((bitField0_ & 0x00000004) == 0x00000004);
423      }
424      /**
425       * <code>optional uint64 offset = 3;</code>
426       */
427      public long getOffset() {
428        return offset_;
429      }
430
431      private void initFields() {
432        name_ = "";
433        length_ = 0L;
434        offset_ = 0L;
435      }
436      private byte memoizedIsInitialized = -1;
437      public final boolean isInitialized() {
438        byte isInitialized = memoizedIsInitialized;
439        if (isInitialized != -1) return isInitialized == 1;
440
441        memoizedIsInitialized = 1;
442        return true;
443      }
444
445      public void writeTo(com.google.protobuf.CodedOutputStream output)
446                          throws java.io.IOException {
447        getSerializedSize();
448        if (((bitField0_ & 0x00000001) == 0x00000001)) {
449          output.writeBytes(1, getNameBytes());
450        }
451        if (((bitField0_ & 0x00000002) == 0x00000002)) {
452          output.writeUInt64(2, length_);
453        }
454        if (((bitField0_ & 0x00000004) == 0x00000004)) {
455          output.writeUInt64(3, offset_);
456        }
457        getUnknownFields().writeTo(output);
458      }
459
460      private int memoizedSerializedSize = -1;
461      public int getSerializedSize() {
462        int size = memoizedSerializedSize;
463        if (size != -1) return size;
464
465        size = 0;
466        if (((bitField0_ & 0x00000001) == 0x00000001)) {
467          size += com.google.protobuf.CodedOutputStream
468            .computeBytesSize(1, getNameBytes());
469        }
470        if (((bitField0_ & 0x00000002) == 0x00000002)) {
471          size += com.google.protobuf.CodedOutputStream
472            .computeUInt64Size(2, length_);
473        }
474        if (((bitField0_ & 0x00000004) == 0x00000004)) {
475          size += com.google.protobuf.CodedOutputStream
476            .computeUInt64Size(3, offset_);
477        }
478        size += getUnknownFields().getSerializedSize();
479        memoizedSerializedSize = size;
480        return size;
481      }
482
483      private static final long serialVersionUID = 0L;
484      @java.lang.Override
485      protected java.lang.Object writeReplace()
486          throws java.io.ObjectStreamException {
487        return super.writeReplace();
488      }
489
490      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
491          com.google.protobuf.ByteString data)
492          throws com.google.protobuf.InvalidProtocolBufferException {
493        return PARSER.parseFrom(data);
494      }
495      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
496          com.google.protobuf.ByteString data,
497          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
498          throws com.google.protobuf.InvalidProtocolBufferException {
499        return PARSER.parseFrom(data, extensionRegistry);
500      }
501      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(byte[] data)
502          throws com.google.protobuf.InvalidProtocolBufferException {
503        return PARSER.parseFrom(data);
504      }
505      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
506          byte[] data,
507          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
508          throws com.google.protobuf.InvalidProtocolBufferException {
509        return PARSER.parseFrom(data, extensionRegistry);
510      }
511      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(java.io.InputStream input)
512          throws java.io.IOException {
513        return PARSER.parseFrom(input);
514      }
515      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
516          java.io.InputStream input,
517          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
518          throws java.io.IOException {
519        return PARSER.parseFrom(input, extensionRegistry);
520      }
521      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom(java.io.InputStream input)
522          throws java.io.IOException {
523        return PARSER.parseDelimitedFrom(input);
524      }
525      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom(
526          java.io.InputStream input,
527          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
528          throws java.io.IOException {
529        return PARSER.parseDelimitedFrom(input, extensionRegistry);
530      }
531      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
532          com.google.protobuf.CodedInputStream input)
533          throws java.io.IOException {
534        return PARSER.parseFrom(input);
535      }
536      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
537          com.google.protobuf.CodedInputStream input,
538          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
539          throws java.io.IOException {
540        return PARSER.parseFrom(input, extensionRegistry);
541      }
542
543      public static Builder newBuilder() { return Builder.create(); }
544      public Builder newBuilderForType() { return newBuilder(); }
545      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section prototype) {
546        return newBuilder().mergeFrom(prototype);
547      }
548      public Builder toBuilder() { return newBuilder(this); }
549
550      @java.lang.Override
551      protected Builder newBuilderForType(
552          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
553        Builder builder = new Builder(parent);
554        return builder;
555      }
556      /**
557       * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section}
558       *
559       * <pre>
560       * index for each section
561       * </pre>
562       */
563      public static final class Builder extends
564          com.google.protobuf.GeneratedMessage.Builder<Builder>
565         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder {
566        public static final com.google.protobuf.Descriptors.Descriptor
567            getDescriptor() {
568          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
569        }
570
571        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
572            internalGetFieldAccessorTable() {
573          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable
574              .ensureFieldAccessorsInitialized(
575                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class);
576        }
577
578        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.newBuilder()
579        private Builder() {
580          maybeForceBuilderInitialization();
581        }
582
583        private Builder(
584            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
585          super(parent);
586          maybeForceBuilderInitialization();
587        }
588        private void maybeForceBuilderInitialization() {
589          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
590          }
591        }
592        private static Builder create() {
593          return new Builder();
594        }
595
596        public Builder clear() {
597          super.clear();
598          name_ = "";
599          bitField0_ = (bitField0_ & ~0x00000001);
600          length_ = 0L;
601          bitField0_ = (bitField0_ & ~0x00000002);
602          offset_ = 0L;
603          bitField0_ = (bitField0_ & ~0x00000004);
604          return this;
605        }
606
607        public Builder clone() {
608          return create().mergeFrom(buildPartial());
609        }
610
611        public com.google.protobuf.Descriptors.Descriptor
612            getDescriptorForType() {
613          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
614        }
615
616        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstanceForType() {
617          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance();
618        }
619
620        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section build() {
621          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = buildPartial();
622          if (!result.isInitialized()) {
623            throw newUninitializedMessageException(result);
624          }
625          return result;
626        }
627
628        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section buildPartial() {
629          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section(this);
630          int from_bitField0_ = bitField0_;
631          int to_bitField0_ = 0;
632          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
633            to_bitField0_ |= 0x00000001;
634          }
635          result.name_ = name_;
636          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
637            to_bitField0_ |= 0x00000002;
638          }
639          result.length_ = length_;
640          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
641            to_bitField0_ |= 0x00000004;
642          }
643          result.offset_ = offset_;
644          result.bitField0_ = to_bitField0_;
645          onBuilt();
646          return result;
647        }
648
649        public Builder mergeFrom(com.google.protobuf.Message other) {
650          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) {
651            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section)other);
652          } else {
653            super.mergeFrom(other);
654            return this;
655          }
656        }
657
658        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section other) {
659          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance()) return this;
660          if (other.hasName()) {
661            bitField0_ |= 0x00000001;
662            name_ = other.name_;
663            onChanged();
664          }
665          if (other.hasLength()) {
666            setLength(other.getLength());
667          }
668          if (other.hasOffset()) {
669            setOffset(other.getOffset());
670          }
671          this.mergeUnknownFields(other.getUnknownFields());
672          return this;
673        }
674
675        public final boolean isInitialized() {
676          return true;
677        }
678
679        public Builder mergeFrom(
680            com.google.protobuf.CodedInputStream input,
681            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
682            throws java.io.IOException {
683          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parsedMessage = null;
684          try {
685            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
686          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
687            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) e.getUnfinishedMessage();
688            throw e;
689          } finally {
690            if (parsedMessage != null) {
691              mergeFrom(parsedMessage);
692            }
693          }
694          return this;
695        }
696        private int bitField0_;
697
698        // optional string name = 1;
699        private java.lang.Object name_ = "";
700        /**
701         * <code>optional string name = 1;</code>
702         */
703        public boolean hasName() {
704          return ((bitField0_ & 0x00000001) == 0x00000001);
705        }
706        /**
707         * <code>optional string name = 1;</code>
708         */
709        public java.lang.String getName() {
710          java.lang.Object ref = name_;
711          if (!(ref instanceof java.lang.String)) {
712            java.lang.String s = ((com.google.protobuf.ByteString) ref)
713                .toStringUtf8();
714            name_ = s;
715            return s;
716          } else {
717            return (java.lang.String) ref;
718          }
719        }
720        /**
721         * <code>optional string name = 1;</code>
722         */
723        public com.google.protobuf.ByteString
724            getNameBytes() {
725          java.lang.Object ref = name_;
726          if (ref instanceof String) {
727            com.google.protobuf.ByteString b = 
728                com.google.protobuf.ByteString.copyFromUtf8(
729                    (java.lang.String) ref);
730            name_ = b;
731            return b;
732          } else {
733            return (com.google.protobuf.ByteString) ref;
734          }
735        }
736        /**
737         * <code>optional string name = 1;</code>
738         */
739        public Builder setName(
740            java.lang.String value) {
741          if (value == null) {
742    throw new NullPointerException();
743  }
744  bitField0_ |= 0x00000001;
745          name_ = value;
746          onChanged();
747          return this;
748        }
749        /**
750         * <code>optional string name = 1;</code>
751         */
752        public Builder clearName() {
753          bitField0_ = (bitField0_ & ~0x00000001);
754          name_ = getDefaultInstance().getName();
755          onChanged();
756          return this;
757        }
758        /**
759         * <code>optional string name = 1;</code>
760         */
761        public Builder setNameBytes(
762            com.google.protobuf.ByteString value) {
763          if (value == null) {
764    throw new NullPointerException();
765  }
766  bitField0_ |= 0x00000001;
767          name_ = value;
768          onChanged();
769          return this;
770        }
771
772        // optional uint64 length = 2;
773        private long length_ ;
774        /**
775         * <code>optional uint64 length = 2;</code>
776         */
777        public boolean hasLength() {
778          return ((bitField0_ & 0x00000002) == 0x00000002);
779        }
780        /**
781         * <code>optional uint64 length = 2;</code>
782         */
783        public long getLength() {
784          return length_;
785        }
786        /**
787         * <code>optional uint64 length = 2;</code>
788         */
789        public Builder setLength(long value) {
790          bitField0_ |= 0x00000002;
791          length_ = value;
792          onChanged();
793          return this;
794        }
795        /**
796         * <code>optional uint64 length = 2;</code>
797         */
798        public Builder clearLength() {
799          bitField0_ = (bitField0_ & ~0x00000002);
800          length_ = 0L;
801          onChanged();
802          return this;
803        }
804
805        // optional uint64 offset = 3;
806        private long offset_ ;
807        /**
808         * <code>optional uint64 offset = 3;</code>
809         */
810        public boolean hasOffset() {
811          return ((bitField0_ & 0x00000004) == 0x00000004);
812        }
813        /**
814         * <code>optional uint64 offset = 3;</code>
815         */
816        public long getOffset() {
817          return offset_;
818        }
819        /**
820         * <code>optional uint64 offset = 3;</code>
821         */
822        public Builder setOffset(long value) {
823          bitField0_ |= 0x00000004;
824          offset_ = value;
825          onChanged();
826          return this;
827        }
828        /**
829         * <code>optional uint64 offset = 3;</code>
830         */
831        public Builder clearOffset() {
832          bitField0_ = (bitField0_ & ~0x00000004);
833          offset_ = 0L;
834          onChanged();
835          return this;
836        }
837
838        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary.Section)
839      }
840
841      static {
842        defaultInstance = new Section(true);
843        defaultInstance.initFields();
844      }
845
846      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary.Section)
847    }
848
849    private int bitField0_;
850    // required uint32 ondiskVersion = 1;
851    public static final int ONDISKVERSION_FIELD_NUMBER = 1;
852    private int ondiskVersion_;
853    /**
854     * <code>required uint32 ondiskVersion = 1;</code>
855     *
856     * <pre>
857     * The version of the above EBNF grammars.
858     * </pre>
859     */
860    public boolean hasOndiskVersion() {
861      return ((bitField0_ & 0x00000001) == 0x00000001);
862    }
863    /**
864     * <code>required uint32 ondiskVersion = 1;</code>
865     *
866     * <pre>
867     * The version of the above EBNF grammars.
868     * </pre>
869     */
870    public int getOndiskVersion() {
871      return ondiskVersion_;
872    }
873
874    // required uint32 layoutVersion = 2;
875    public static final int LAYOUTVERSION_FIELD_NUMBER = 2;
876    private int layoutVersion_;
877    /**
878     * <code>required uint32 layoutVersion = 2;</code>
879     *
880     * <pre>
881     * layoutVersion describes which features are available in the
882     * FSImage.
883     * </pre>
884     */
885    public boolean hasLayoutVersion() {
886      return ((bitField0_ & 0x00000002) == 0x00000002);
887    }
888    /**
889     * <code>required uint32 layoutVersion = 2;</code>
890     *
891     * <pre>
892     * layoutVersion describes which features are available in the
893     * FSImage.
894     * </pre>
895     */
896    public int getLayoutVersion() {
897      return layoutVersion_;
898    }
899
900    // optional string codec = 3;
901    public static final int CODEC_FIELD_NUMBER = 3;
902    private java.lang.Object codec_;
903    /**
904     * <code>optional string codec = 3;</code>
905     */
906    public boolean hasCodec() {
907      return ((bitField0_ & 0x00000004) == 0x00000004);
908    }
909    /**
910     * <code>optional string codec = 3;</code>
911     */
912    public java.lang.String getCodec() {
913      java.lang.Object ref = codec_;
914      if (ref instanceof java.lang.String) {
915        return (java.lang.String) ref;
916      } else {
917        com.google.protobuf.ByteString bs = 
918            (com.google.protobuf.ByteString) ref;
919        java.lang.String s = bs.toStringUtf8();
920        if (bs.isValidUtf8()) {
921          codec_ = s;
922        }
923        return s;
924      }
925    }
926    /**
927     * <code>optional string codec = 3;</code>
928     */
929    public com.google.protobuf.ByteString
930        getCodecBytes() {
931      java.lang.Object ref = codec_;
932      if (ref instanceof java.lang.String) {
933        com.google.protobuf.ByteString b = 
934            com.google.protobuf.ByteString.copyFromUtf8(
935                (java.lang.String) ref);
936        codec_ = b;
937        return b;
938      } else {
939        return (com.google.protobuf.ByteString) ref;
940      }
941    }
942
943    // repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;
944    public static final int SECTIONS_FIELD_NUMBER = 4;
945    private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> sections_;
946    /**
947     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
948     */
949    public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> getSectionsList() {
950      return sections_;
951    }
952    /**
953     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
954     */
955    public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
956        getSectionsOrBuilderList() {
957      return sections_;
958    }
959    /**
960     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
961     */
962    public int getSectionsCount() {
963      return sections_.size();
964    }
965    /**
966     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
967     */
968    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) {
969      return sections_.get(index);
970    }
971    /**
972     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
973     */
974    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
975        int index) {
976      return sections_.get(index);
977    }
978
979    private void initFields() {
980      ondiskVersion_ = 0;
981      layoutVersion_ = 0;
982      codec_ = "";
983      sections_ = java.util.Collections.emptyList();
984    }
985    private byte memoizedIsInitialized = -1;
986    public final boolean isInitialized() {
987      byte isInitialized = memoizedIsInitialized;
988      if (isInitialized != -1) return isInitialized == 1;
989
990      if (!hasOndiskVersion()) {
991        memoizedIsInitialized = 0;
992        return false;
993      }
994      if (!hasLayoutVersion()) {
995        memoizedIsInitialized = 0;
996        return false;
997      }
998      memoizedIsInitialized = 1;
999      return true;
1000    }
1001
1002    public void writeTo(com.google.protobuf.CodedOutputStream output)
1003                        throws java.io.IOException {
1004      getSerializedSize();
1005      if (((bitField0_ & 0x00000001) == 0x00000001)) {
1006        output.writeUInt32(1, ondiskVersion_);
1007      }
1008      if (((bitField0_ & 0x00000002) == 0x00000002)) {
1009        output.writeUInt32(2, layoutVersion_);
1010      }
1011      if (((bitField0_ & 0x00000004) == 0x00000004)) {
1012        output.writeBytes(3, getCodecBytes());
1013      }
1014      for (int i = 0; i < sections_.size(); i++) {
1015        output.writeMessage(4, sections_.get(i));
1016      }
1017      getUnknownFields().writeTo(output);
1018    }
1019
1020    private int memoizedSerializedSize = -1;
1021    public int getSerializedSize() {
1022      int size = memoizedSerializedSize;
1023      if (size != -1) return size;
1024
1025      size = 0;
1026      if (((bitField0_ & 0x00000001) == 0x00000001)) {
1027        size += com.google.protobuf.CodedOutputStream
1028          .computeUInt32Size(1, ondiskVersion_);
1029      }
1030      if (((bitField0_ & 0x00000002) == 0x00000002)) {
1031        size += com.google.protobuf.CodedOutputStream
1032          .computeUInt32Size(2, layoutVersion_);
1033      }
1034      if (((bitField0_ & 0x00000004) == 0x00000004)) {
1035        size += com.google.protobuf.CodedOutputStream
1036          .computeBytesSize(3, getCodecBytes());
1037      }
1038      for (int i = 0; i < sections_.size(); i++) {
1039        size += com.google.protobuf.CodedOutputStream
1040          .computeMessageSize(4, sections_.get(i));
1041      }
1042      size += getUnknownFields().getSerializedSize();
1043      memoizedSerializedSize = size;
1044      return size;
1045    }
1046
1047    private static final long serialVersionUID = 0L;
1048    @java.lang.Override
1049    protected java.lang.Object writeReplace()
1050        throws java.io.ObjectStreamException {
1051      return super.writeReplace();
1052    }
1053
1054    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1055        com.google.protobuf.ByteString data)
1056        throws com.google.protobuf.InvalidProtocolBufferException {
1057      return PARSER.parseFrom(data);
1058    }
1059    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1060        com.google.protobuf.ByteString data,
1061        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1062        throws com.google.protobuf.InvalidProtocolBufferException {
1063      return PARSER.parseFrom(data, extensionRegistry);
1064    }
1065    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(byte[] data)
1066        throws com.google.protobuf.InvalidProtocolBufferException {
1067      return PARSER.parseFrom(data);
1068    }
1069    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1070        byte[] data,
1071        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1072        throws com.google.protobuf.InvalidProtocolBufferException {
1073      return PARSER.parseFrom(data, extensionRegistry);
1074    }
1075    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(java.io.InputStream input)
1076        throws java.io.IOException {
1077      return PARSER.parseFrom(input);
1078    }
1079    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1080        java.io.InputStream input,
1081        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1082        throws java.io.IOException {
1083      return PARSER.parseFrom(input, extensionRegistry);
1084    }
1085    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom(java.io.InputStream input)
1086        throws java.io.IOException {
1087      return PARSER.parseDelimitedFrom(input);
1088    }
1089    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom(
1090        java.io.InputStream input,
1091        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1092        throws java.io.IOException {
1093      return PARSER.parseDelimitedFrom(input, extensionRegistry);
1094    }
1095    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1096        com.google.protobuf.CodedInputStream input)
1097        throws java.io.IOException {
1098      return PARSER.parseFrom(input);
1099    }
1100    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
1101        com.google.protobuf.CodedInputStream input,
1102        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1103        throws java.io.IOException {
1104      return PARSER.parseFrom(input, extensionRegistry);
1105    }
1106
1107    public static Builder newBuilder() { return Builder.create(); }
1108    public Builder newBuilderForType() { return newBuilder(); }
1109    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary prototype) {
1110      return newBuilder().mergeFrom(prototype);
1111    }
1112    public Builder toBuilder() { return newBuilder(this); }
1113
1114    @java.lang.Override
1115    protected Builder newBuilderForType(
1116        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1117      Builder builder = new Builder(parent);
1118      return builder;
1119    }
1120    /**
1121     * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary}
1122     */
1123    public static final class Builder extends
1124        com.google.protobuf.GeneratedMessage.Builder<Builder>
1125       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummaryOrBuilder {
1126      public static final com.google.protobuf.Descriptors.Descriptor
1127          getDescriptor() {
1128        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
1129      }
1130
1131      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1132          internalGetFieldAccessorTable() {
1133        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable
1134            .ensureFieldAccessorsInitialized(
1135                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class);
1136      }
1137
1138      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.newBuilder()
1139      private Builder() {
1140        maybeForceBuilderInitialization();
1141      }
1142
1143      private Builder(
1144          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1145        super(parent);
1146        maybeForceBuilderInitialization();
1147      }
1148      private void maybeForceBuilderInitialization() {
1149        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1150          getSectionsFieldBuilder();
1151        }
1152      }
1153      private static Builder create() {
1154        return new Builder();
1155      }
1156
1157      public Builder clear() {
1158        super.clear();
1159        ondiskVersion_ = 0;
1160        bitField0_ = (bitField0_ & ~0x00000001);
1161        layoutVersion_ = 0;
1162        bitField0_ = (bitField0_ & ~0x00000002);
1163        codec_ = "";
1164        bitField0_ = (bitField0_ & ~0x00000004);
1165        if (sectionsBuilder_ == null) {
1166          sections_ = java.util.Collections.emptyList();
1167          bitField0_ = (bitField0_ & ~0x00000008);
1168        } else {
1169          sectionsBuilder_.clear();
1170        }
1171        return this;
1172      }
1173
1174      public Builder clone() {
1175        return create().mergeFrom(buildPartial());
1176      }
1177
1178      public com.google.protobuf.Descriptors.Descriptor
1179          getDescriptorForType() {
1180        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
1181      }
1182
1183      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstanceForType() {
1184        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance();
1185      }
1186
1187      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary build() {
1188        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = buildPartial();
1189        if (!result.isInitialized()) {
1190          throw newUninitializedMessageException(result);
1191        }
1192        return result;
1193      }
1194
1195      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary buildPartial() {
1196        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary(this);
1197        int from_bitField0_ = bitField0_;
1198        int to_bitField0_ = 0;
1199        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1200          to_bitField0_ |= 0x00000001;
1201        }
1202        result.ondiskVersion_ = ondiskVersion_;
1203        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1204          to_bitField0_ |= 0x00000002;
1205        }
1206        result.layoutVersion_ = layoutVersion_;
1207        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1208          to_bitField0_ |= 0x00000004;
1209        }
1210        result.codec_ = codec_;
1211        if (sectionsBuilder_ == null) {
1212          if (((bitField0_ & 0x00000008) == 0x00000008)) {
1213            sections_ = java.util.Collections.unmodifiableList(sections_);
1214            bitField0_ = (bitField0_ & ~0x00000008);
1215          }
1216          result.sections_ = sections_;
1217        } else {
1218          result.sections_ = sectionsBuilder_.build();
1219        }
1220        result.bitField0_ = to_bitField0_;
1221        onBuilt();
1222        return result;
1223      }
1224
1225      public Builder mergeFrom(com.google.protobuf.Message other) {
1226        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) {
1227          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary)other);
1228        } else {
1229          super.mergeFrom(other);
1230          return this;
1231        }
1232      }
1233
1234      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary other) {
1235        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance()) return this;
1236        if (other.hasOndiskVersion()) {
1237          setOndiskVersion(other.getOndiskVersion());
1238        }
1239        if (other.hasLayoutVersion()) {
1240          setLayoutVersion(other.getLayoutVersion());
1241        }
1242        if (other.hasCodec()) {
1243          bitField0_ |= 0x00000004;
1244          codec_ = other.codec_;
1245          onChanged();
1246        }
1247        if (sectionsBuilder_ == null) {
1248          if (!other.sections_.isEmpty()) {
1249            if (sections_.isEmpty()) {
1250              sections_ = other.sections_;
1251              bitField0_ = (bitField0_ & ~0x00000008);
1252            } else {
1253              ensureSectionsIsMutable();
1254              sections_.addAll(other.sections_);
1255            }
1256            onChanged();
1257          }
1258        } else {
1259          if (!other.sections_.isEmpty()) {
1260            if (sectionsBuilder_.isEmpty()) {
1261              sectionsBuilder_.dispose();
1262              sectionsBuilder_ = null;
1263              sections_ = other.sections_;
1264              bitField0_ = (bitField0_ & ~0x00000008);
1265              sectionsBuilder_ = 
1266                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
1267                   getSectionsFieldBuilder() : null;
1268            } else {
1269              sectionsBuilder_.addAllMessages(other.sections_);
1270            }
1271          }
1272        }
1273        this.mergeUnknownFields(other.getUnknownFields());
1274        return this;
1275      }
1276
1277      public final boolean isInitialized() {
1278        if (!hasOndiskVersion()) {
1279          
1280          return false;
1281        }
1282        if (!hasLayoutVersion()) {
1283          
1284          return false;
1285        }
1286        return true;
1287      }
1288
1289      public Builder mergeFrom(
1290          com.google.protobuf.CodedInputStream input,
1291          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1292          throws java.io.IOException {
1293        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parsedMessage = null;
1294        try {
1295          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1296        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1297          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) e.getUnfinishedMessage();
1298          throw e;
1299        } finally {
1300          if (parsedMessage != null) {
1301            mergeFrom(parsedMessage);
1302          }
1303        }
1304        return this;
1305      }
1306      private int bitField0_;
1307
1308      // required uint32 ondiskVersion = 1;
1309      private int ondiskVersion_ ;
1310      /**
1311       * <code>required uint32 ondiskVersion = 1;</code>
1312       *
1313       * <pre>
1314       * The version of the above EBNF grammars.
1315       * </pre>
1316       */
1317      public boolean hasOndiskVersion() {
1318        return ((bitField0_ & 0x00000001) == 0x00000001);
1319      }
1320      /**
1321       * <code>required uint32 ondiskVersion = 1;</code>
1322       *
1323       * <pre>
1324       * The version of the above EBNF grammars.
1325       * </pre>
1326       */
1327      public int getOndiskVersion() {
1328        return ondiskVersion_;
1329      }
1330      /**
1331       * <code>required uint32 ondiskVersion = 1;</code>
1332       *
1333       * <pre>
1334       * The version of the above EBNF grammars.
1335       * </pre>
1336       */
1337      public Builder setOndiskVersion(int value) {
1338        bitField0_ |= 0x00000001;
1339        ondiskVersion_ = value;
1340        onChanged();
1341        return this;
1342      }
1343      /**
1344       * <code>required uint32 ondiskVersion = 1;</code>
1345       *
1346       * <pre>
1347       * The version of the above EBNF grammars.
1348       * </pre>
1349       */
1350      public Builder clearOndiskVersion() {
1351        bitField0_ = (bitField0_ & ~0x00000001);
1352        ondiskVersion_ = 0;
1353        onChanged();
1354        return this;
1355      }
1356
1357      // required uint32 layoutVersion = 2;
1358      private int layoutVersion_ ;
1359      /**
1360       * <code>required uint32 layoutVersion = 2;</code>
1361       *
1362       * <pre>
1363       * layoutVersion describes which features are available in the
1364       * FSImage.
1365       * </pre>
1366       */
1367      public boolean hasLayoutVersion() {
1368        return ((bitField0_ & 0x00000002) == 0x00000002);
1369      }
1370      /**
1371       * <code>required uint32 layoutVersion = 2;</code>
1372       *
1373       * <pre>
1374       * layoutVersion describes which features are available in the
1375       * FSImage.
1376       * </pre>
1377       */
1378      public int getLayoutVersion() {
1379        return layoutVersion_;
1380      }
1381      /**
1382       * <code>required uint32 layoutVersion = 2;</code>
1383       *
1384       * <pre>
1385       * layoutVersion describes which features are available in the
1386       * FSImage.
1387       * </pre>
1388       */
1389      public Builder setLayoutVersion(int value) {
1390        bitField0_ |= 0x00000002;
1391        layoutVersion_ = value;
1392        onChanged();
1393        return this;
1394      }
1395      /**
1396       * <code>required uint32 layoutVersion = 2;</code>
1397       *
1398       * <pre>
1399       * layoutVersion describes which features are available in the
1400       * FSImage.
1401       * </pre>
1402       */
1403      public Builder clearLayoutVersion() {
1404        bitField0_ = (bitField0_ & ~0x00000002);
1405        layoutVersion_ = 0;
1406        onChanged();
1407        return this;
1408      }
1409
1410      // optional string codec = 3;
1411      private java.lang.Object codec_ = "";
1412      /**
1413       * <code>optional string codec = 3;</code>
1414       */
1415      public boolean hasCodec() {
1416        return ((bitField0_ & 0x00000004) == 0x00000004);
1417      }
1418      /**
1419       * <code>optional string codec = 3;</code>
1420       */
1421      public java.lang.String getCodec() {
1422        java.lang.Object ref = codec_;
1423        if (!(ref instanceof java.lang.String)) {
1424          java.lang.String s = ((com.google.protobuf.ByteString) ref)
1425              .toStringUtf8();
1426          codec_ = s;
1427          return s;
1428        } else {
1429          return (java.lang.String) ref;
1430        }
1431      }
1432      /**
1433       * <code>optional string codec = 3;</code>
1434       */
1435      public com.google.protobuf.ByteString
1436          getCodecBytes() {
1437        java.lang.Object ref = codec_;
1438        if (ref instanceof String) {
1439          com.google.protobuf.ByteString b = 
1440              com.google.protobuf.ByteString.copyFromUtf8(
1441                  (java.lang.String) ref);
1442          codec_ = b;
1443          return b;
1444        } else {
1445          return (com.google.protobuf.ByteString) ref;
1446        }
1447      }
1448      /**
1449       * <code>optional string codec = 3;</code>
1450       */
1451      public Builder setCodec(
1452          java.lang.String value) {
1453        if (value == null) {
1454    throw new NullPointerException();
1455  }
1456  bitField0_ |= 0x00000004;
1457        codec_ = value;
1458        onChanged();
1459        return this;
1460      }
1461      /**
1462       * <code>optional string codec = 3;</code>
1463       */
1464      public Builder clearCodec() {
1465        bitField0_ = (bitField0_ & ~0x00000004);
1466        codec_ = getDefaultInstance().getCodec();
1467        onChanged();
1468        return this;
1469      }
1470      /**
1471       * <code>optional string codec = 3;</code>
1472       */
1473      public Builder setCodecBytes(
1474          com.google.protobuf.ByteString value) {
1475        if (value == null) {
1476    throw new NullPointerException();
1477  }
1478  bitField0_ |= 0x00000004;
1479        codec_ = value;
1480        onChanged();
1481        return this;
1482      }
1483
1484      // repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;
1485      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> sections_ =
1486        java.util.Collections.emptyList();
1487      private void ensureSectionsIsMutable() {
1488        if (!((bitField0_ & 0x00000008) == 0x00000008)) {
1489          sections_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section>(sections_);
1490          bitField0_ |= 0x00000008;
1491         }
1492      }
1493
1494      private com.google.protobuf.RepeatedFieldBuilder<
1495          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> sectionsBuilder_;
1496
1497      /**
1498       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1499       */
1500      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> getSectionsList() {
1501        if (sectionsBuilder_ == null) {
1502          return java.util.Collections.unmodifiableList(sections_);
1503        } else {
1504          return sectionsBuilder_.getMessageList();
1505        }
1506      }
1507      /**
1508       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1509       */
1510      public int getSectionsCount() {
1511        if (sectionsBuilder_ == null) {
1512          return sections_.size();
1513        } else {
1514          return sectionsBuilder_.getCount();
1515        }
1516      }
1517      /**
1518       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1519       */
1520      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) {
1521        if (sectionsBuilder_ == null) {
1522          return sections_.get(index);
1523        } else {
1524          return sectionsBuilder_.getMessage(index);
1525        }
1526      }
1527      /**
1528       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1529       */
1530      public Builder setSections(
1531          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
1532        if (sectionsBuilder_ == null) {
1533          if (value == null) {
1534            throw new NullPointerException();
1535          }
1536          ensureSectionsIsMutable();
1537          sections_.set(index, value);
1538          onChanged();
1539        } else {
1540          sectionsBuilder_.setMessage(index, value);
1541        }
1542        return this;
1543      }
1544      /**
1545       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1546       */
1547      public Builder setSections(
1548          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
1549        if (sectionsBuilder_ == null) {
1550          ensureSectionsIsMutable();
1551          sections_.set(index, builderForValue.build());
1552          onChanged();
1553        } else {
1554          sectionsBuilder_.setMessage(index, builderForValue.build());
1555        }
1556        return this;
1557      }
1558      /**
1559       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1560       */
1561      public Builder addSections(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
1562        if (sectionsBuilder_ == null) {
1563          if (value == null) {
1564            throw new NullPointerException();
1565          }
1566          ensureSectionsIsMutable();
1567          sections_.add(value);
1568          onChanged();
1569        } else {
1570          sectionsBuilder_.addMessage(value);
1571        }
1572        return this;
1573      }
1574      /**
1575       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1576       */
1577      public Builder addSections(
1578          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
1579        if (sectionsBuilder_ == null) {
1580          if (value == null) {
1581            throw new NullPointerException();
1582          }
1583          ensureSectionsIsMutable();
1584          sections_.add(index, value);
1585          onChanged();
1586        } else {
1587          sectionsBuilder_.addMessage(index, value);
1588        }
1589        return this;
1590      }
1591      /**
1592       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1593       */
1594      public Builder addSections(
1595          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
1596        if (sectionsBuilder_ == null) {
1597          ensureSectionsIsMutable();
1598          sections_.add(builderForValue.build());
1599          onChanged();
1600        } else {
1601          sectionsBuilder_.addMessage(builderForValue.build());
1602        }
1603        return this;
1604      }
1605      /**
1606       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1607       */
1608      public Builder addSections(
1609          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
1610        if (sectionsBuilder_ == null) {
1611          ensureSectionsIsMutable();
1612          sections_.add(index, builderForValue.build());
1613          onChanged();
1614        } else {
1615          sectionsBuilder_.addMessage(index, builderForValue.build());
1616        }
1617        return this;
1618      }
1619      /**
1620       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1621       */
1622      public Builder addAllSections(
1623          java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> values) {
1624        if (sectionsBuilder_ == null) {
1625          ensureSectionsIsMutable();
1626          super.addAll(values, sections_);
1627          onChanged();
1628        } else {
1629          sectionsBuilder_.addAllMessages(values);
1630        }
1631        return this;
1632      }
1633      /**
1634       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1635       */
1636      public Builder clearSections() {
1637        if (sectionsBuilder_ == null) {
1638          sections_ = java.util.Collections.emptyList();
1639          bitField0_ = (bitField0_ & ~0x00000008);
1640          onChanged();
1641        } else {
1642          sectionsBuilder_.clear();
1643        }
1644        return this;
1645      }
1646      /**
1647       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1648       */
1649      public Builder removeSections(int index) {
1650        if (sectionsBuilder_ == null) {
1651          ensureSectionsIsMutable();
1652          sections_.remove(index);
1653          onChanged();
1654        } else {
1655          sectionsBuilder_.remove(index);
1656        }
1657        return this;
1658      }
1659      /**
1660       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1661       */
1662      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder getSectionsBuilder(
1663          int index) {
1664        return getSectionsFieldBuilder().getBuilder(index);
1665      }
1666      /**
1667       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1668       */
1669      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
1670          int index) {
1671        if (sectionsBuilder_ == null) {
1672          return sections_.get(index);  } else {
1673          return sectionsBuilder_.getMessageOrBuilder(index);
1674        }
1675      }
1676      /**
1677       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1678       */
1679      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
1680           getSectionsOrBuilderList() {
1681        if (sectionsBuilder_ != null) {
1682          return sectionsBuilder_.getMessageOrBuilderList();
1683        } else {
1684          return java.util.Collections.unmodifiableList(sections_);
1685        }
1686      }
1687      /**
1688       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1689       */
1690      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder() {
1691        return getSectionsFieldBuilder().addBuilder(
1692            org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance());
1693      }
1694      /**
1695       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1696       */
1697      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder(
1698          int index) {
1699        return getSectionsFieldBuilder().addBuilder(
1700            index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance());
1701      }
1702      /**
1703       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
1704       */
1705      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder> 
1706           getSectionsBuilderList() {
1707        return getSectionsFieldBuilder().getBuilderList();
1708      }
1709      private com.google.protobuf.RepeatedFieldBuilder<
1710          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
1711          getSectionsFieldBuilder() {
1712        if (sectionsBuilder_ == null) {
1713          sectionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
1714              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder>(
1715                  sections_,
1716                  ((bitField0_ & 0x00000008) == 0x00000008),
1717                  getParentForChildren(),
1718                  isClean());
1719          sections_ = null;
1720        }
1721        return sectionsBuilder_;
1722      }
1723
1724      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary)
1725    }
1726
1727    static {
1728      defaultInstance = new FileSummary(true);
1729      defaultInstance.initFields();
1730    }
1731
1732    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary)
1733  }
1734
1735  public interface NameSystemSectionOrBuilder
1736      extends com.google.protobuf.MessageOrBuilder {
1737
1738    // optional uint32 namespaceId = 1;
1739    /**
1740     * <code>optional uint32 namespaceId = 1;</code>
1741     */
1742    boolean hasNamespaceId();
1743    /**
1744     * <code>optional uint32 namespaceId = 1;</code>
1745     */
1746    int getNamespaceId();
1747
1748    // optional uint64 genstampV1 = 2;
1749    /**
1750     * <code>optional uint64 genstampV1 = 2;</code>
1751     */
1752    boolean hasGenstampV1();
1753    /**
1754     * <code>optional uint64 genstampV1 = 2;</code>
1755     */
1756    long getGenstampV1();
1757
1758    // optional uint64 genstampV2 = 3;
1759    /**
1760     * <code>optional uint64 genstampV2 = 3;</code>
1761     */
1762    boolean hasGenstampV2();
1763    /**
1764     * <code>optional uint64 genstampV2 = 3;</code>
1765     */
1766    long getGenstampV2();
1767
1768    // optional uint64 genstampV1Limit = 4;
1769    /**
1770     * <code>optional uint64 genstampV1Limit = 4;</code>
1771     */
1772    boolean hasGenstampV1Limit();
1773    /**
1774     * <code>optional uint64 genstampV1Limit = 4;</code>
1775     */
1776    long getGenstampV1Limit();
1777
1778    // optional uint64 lastAllocatedBlockId = 5;
1779    /**
1780     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
1781     */
1782    boolean hasLastAllocatedBlockId();
1783    /**
1784     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
1785     */
1786    long getLastAllocatedBlockId();
1787
1788    // optional uint64 transactionId = 6;
1789    /**
1790     * <code>optional uint64 transactionId = 6;</code>
1791     */
1792    boolean hasTransactionId();
1793    /**
1794     * <code>optional uint64 transactionId = 6;</code>
1795     */
1796    long getTransactionId();
1797
1798    // optional uint64 rollingUpgradeStartTime = 7;
1799    /**
1800     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
1801     */
1802    boolean hasRollingUpgradeStartTime();
1803    /**
1804     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
1805     */
1806    long getRollingUpgradeStartTime();
1807  }
1808  /**
1809   * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection}
1810   *
1811   * <pre>
1812   **
1813   * Name: NS_INFO
1814   * </pre>
1815   */
1816  public static final class NameSystemSection extends
1817      com.google.protobuf.GeneratedMessage
1818      implements NameSystemSectionOrBuilder {
1819    // Use NameSystemSection.newBuilder() to construct.
1820    private NameSystemSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1821      super(builder);
1822      this.unknownFields = builder.getUnknownFields();
1823    }
1824    private NameSystemSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1825
1826    private static final NameSystemSection defaultInstance;
1827    public static NameSystemSection getDefaultInstance() {
1828      return defaultInstance;
1829    }
1830
1831    public NameSystemSection getDefaultInstanceForType() {
1832      return defaultInstance;
1833    }
1834
1835    private final com.google.protobuf.UnknownFieldSet unknownFields;
1836    @java.lang.Override
1837    public final com.google.protobuf.UnknownFieldSet
1838        getUnknownFields() {
1839      return this.unknownFields;
1840    }
1841    private NameSystemSection(
1842        com.google.protobuf.CodedInputStream input,
1843        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1844        throws com.google.protobuf.InvalidProtocolBufferException {
1845      initFields();
1846      int mutable_bitField0_ = 0;
1847      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1848          com.google.protobuf.UnknownFieldSet.newBuilder();
1849      try {
1850        boolean done = false;
1851        while (!done) {
1852          int tag = input.readTag();
1853          switch (tag) {
1854            case 0:
1855              done = true;
1856              break;
1857            default: {
1858              if (!parseUnknownField(input, unknownFields,
1859                                     extensionRegistry, tag)) {
1860                done = true;
1861              }
1862              break;
1863            }
1864            case 8: {
1865              bitField0_ |= 0x00000001;
1866              namespaceId_ = input.readUInt32();
1867              break;
1868            }
1869            case 16: {
1870              bitField0_ |= 0x00000002;
1871              genstampV1_ = input.readUInt64();
1872              break;
1873            }
1874            case 24: {
1875              bitField0_ |= 0x00000004;
1876              genstampV2_ = input.readUInt64();
1877              break;
1878            }
1879            case 32: {
1880              bitField0_ |= 0x00000008;
1881              genstampV1Limit_ = input.readUInt64();
1882              break;
1883            }
1884            case 40: {
1885              bitField0_ |= 0x00000010;
1886              lastAllocatedBlockId_ = input.readUInt64();
1887              break;
1888            }
1889            case 48: {
1890              bitField0_ |= 0x00000020;
1891              transactionId_ = input.readUInt64();
1892              break;
1893            }
1894            case 56: {
1895              bitField0_ |= 0x00000040;
1896              rollingUpgradeStartTime_ = input.readUInt64();
1897              break;
1898            }
1899          }
1900        }
1901      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1902        throw e.setUnfinishedMessage(this);
1903      } catch (java.io.IOException e) {
1904        throw new com.google.protobuf.InvalidProtocolBufferException(
1905            e.getMessage()).setUnfinishedMessage(this);
1906      } finally {
1907        this.unknownFields = unknownFields.build();
1908        makeExtensionsImmutable();
1909      }
1910    }
1911    public static final com.google.protobuf.Descriptors.Descriptor
1912        getDescriptor() {
1913      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
1914    }
1915
1916    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1917        internalGetFieldAccessorTable() {
1918      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable
1919          .ensureFieldAccessorsInitialized(
1920              org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class);
1921    }
1922
1923    public static com.google.protobuf.Parser<NameSystemSection> PARSER =
1924        new com.google.protobuf.AbstractParser<NameSystemSection>() {
1925      public NameSystemSection parsePartialFrom(
1926          com.google.protobuf.CodedInputStream input,
1927          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1928          throws com.google.protobuf.InvalidProtocolBufferException {
1929        return new NameSystemSection(input, extensionRegistry);
1930      }
1931    };
1932
1933    @java.lang.Override
1934    public com.google.protobuf.Parser<NameSystemSection> getParserForType() {
1935      return PARSER;
1936    }
1937
1938    private int bitField0_;
1939    // optional uint32 namespaceId = 1;
1940    public static final int NAMESPACEID_FIELD_NUMBER = 1;
1941    private int namespaceId_;
1942    /**
1943     * <code>optional uint32 namespaceId = 1;</code>
1944     */
1945    public boolean hasNamespaceId() {
1946      return ((bitField0_ & 0x00000001) == 0x00000001);
1947    }
1948    /**
1949     * <code>optional uint32 namespaceId = 1;</code>
1950     */
1951    public int getNamespaceId() {
1952      return namespaceId_;
1953    }
1954
1955    // optional uint64 genstampV1 = 2;
1956    public static final int GENSTAMPV1_FIELD_NUMBER = 2;
1957    private long genstampV1_;
1958    /**
1959     * <code>optional uint64 genstampV1 = 2;</code>
1960     */
1961    public boolean hasGenstampV1() {
1962      return ((bitField0_ & 0x00000002) == 0x00000002);
1963    }
1964    /**
1965     * <code>optional uint64 genstampV1 = 2;</code>
1966     */
1967    public long getGenstampV1() {
1968      return genstampV1_;
1969    }
1970
1971    // optional uint64 genstampV2 = 3;
1972    public static final int GENSTAMPV2_FIELD_NUMBER = 3;
1973    private long genstampV2_;
1974    /**
1975     * <code>optional uint64 genstampV2 = 3;</code>
1976     */
1977    public boolean hasGenstampV2() {
1978      return ((bitField0_ & 0x00000004) == 0x00000004);
1979    }
1980    /**
1981     * <code>optional uint64 genstampV2 = 3;</code>
1982     */
1983    public long getGenstampV2() {
1984      return genstampV2_;
1985    }
1986
1987    // optional uint64 genstampV1Limit = 4;
1988    public static final int GENSTAMPV1LIMIT_FIELD_NUMBER = 4;
1989    private long genstampV1Limit_;
1990    /**
1991     * <code>optional uint64 genstampV1Limit = 4;</code>
1992     */
1993    public boolean hasGenstampV1Limit() {
1994      return ((bitField0_ & 0x00000008) == 0x00000008);
1995    }
1996    /**
1997     * <code>optional uint64 genstampV1Limit = 4;</code>
1998     */
1999    public long getGenstampV1Limit() {
2000      return genstampV1Limit_;
2001    }
2002
2003    // optional uint64 lastAllocatedBlockId = 5;
2004    public static final int LASTALLOCATEDBLOCKID_FIELD_NUMBER = 5;
2005    private long lastAllocatedBlockId_;
2006    /**
2007     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2008     */
2009    public boolean hasLastAllocatedBlockId() {
2010      return ((bitField0_ & 0x00000010) == 0x00000010);
2011    }
2012    /**
2013     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2014     */
2015    public long getLastAllocatedBlockId() {
2016      return lastAllocatedBlockId_;
2017    }
2018
2019    // optional uint64 transactionId = 6;
2020    public static final int TRANSACTIONID_FIELD_NUMBER = 6;
2021    private long transactionId_;
2022    /**
2023     * <code>optional uint64 transactionId = 6;</code>
2024     */
2025    public boolean hasTransactionId() {
2026      return ((bitField0_ & 0x00000020) == 0x00000020);
2027    }
2028    /**
2029     * <code>optional uint64 transactionId = 6;</code>
2030     */
2031    public long getTransactionId() {
2032      return transactionId_;
2033    }
2034
2035    // optional uint64 rollingUpgradeStartTime = 7;
2036    public static final int ROLLINGUPGRADESTARTTIME_FIELD_NUMBER = 7;
2037    private long rollingUpgradeStartTime_;
2038    /**
2039     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2040     */
2041    public boolean hasRollingUpgradeStartTime() {
2042      return ((bitField0_ & 0x00000040) == 0x00000040);
2043    }
2044    /**
2045     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2046     */
2047    public long getRollingUpgradeStartTime() {
2048      return rollingUpgradeStartTime_;
2049    }
2050
2051    private void initFields() {
2052      namespaceId_ = 0;
2053      genstampV1_ = 0L;
2054      genstampV2_ = 0L;
2055      genstampV1Limit_ = 0L;
2056      lastAllocatedBlockId_ = 0L;
2057      transactionId_ = 0L;
2058      rollingUpgradeStartTime_ = 0L;
2059    }
2060    private byte memoizedIsInitialized = -1;
2061    public final boolean isInitialized() {
2062      byte isInitialized = memoizedIsInitialized;
2063      if (isInitialized != -1) return isInitialized == 1;
2064
2065      memoizedIsInitialized = 1;
2066      return true;
2067    }
2068
2069    public void writeTo(com.google.protobuf.CodedOutputStream output)
2070                        throws java.io.IOException {
2071      getSerializedSize();
2072      if (((bitField0_ & 0x00000001) == 0x00000001)) {
2073        output.writeUInt32(1, namespaceId_);
2074      }
2075      if (((bitField0_ & 0x00000002) == 0x00000002)) {
2076        output.writeUInt64(2, genstampV1_);
2077      }
2078      if (((bitField0_ & 0x00000004) == 0x00000004)) {
2079        output.writeUInt64(3, genstampV2_);
2080      }
2081      if (((bitField0_ & 0x00000008) == 0x00000008)) {
2082        output.writeUInt64(4, genstampV1Limit_);
2083      }
2084      if (((bitField0_ & 0x00000010) == 0x00000010)) {
2085        output.writeUInt64(5, lastAllocatedBlockId_);
2086      }
2087      if (((bitField0_ & 0x00000020) == 0x00000020)) {
2088        output.writeUInt64(6, transactionId_);
2089      }
2090      if (((bitField0_ & 0x00000040) == 0x00000040)) {
2091        output.writeUInt64(7, rollingUpgradeStartTime_);
2092      }
2093      getUnknownFields().writeTo(output);
2094    }
2095
2096    private int memoizedSerializedSize = -1;
2097    public int getSerializedSize() {
2098      int size = memoizedSerializedSize;
2099      if (size != -1) return size;
2100
2101      size = 0;
2102      if (((bitField0_ & 0x00000001) == 0x00000001)) {
2103        size += com.google.protobuf.CodedOutputStream
2104          .computeUInt32Size(1, namespaceId_);
2105      }
2106      if (((bitField0_ & 0x00000002) == 0x00000002)) {
2107        size += com.google.protobuf.CodedOutputStream
2108          .computeUInt64Size(2, genstampV1_);
2109      }
2110      if (((bitField0_ & 0x00000004) == 0x00000004)) {
2111        size += com.google.protobuf.CodedOutputStream
2112          .computeUInt64Size(3, genstampV2_);
2113      }
2114      if (((bitField0_ & 0x00000008) == 0x00000008)) {
2115        size += com.google.protobuf.CodedOutputStream
2116          .computeUInt64Size(4, genstampV1Limit_);
2117      }
2118      if (((bitField0_ & 0x00000010) == 0x00000010)) {
2119        size += com.google.protobuf.CodedOutputStream
2120          .computeUInt64Size(5, lastAllocatedBlockId_);
2121      }
2122      if (((bitField0_ & 0x00000020) == 0x00000020)) {
2123        size += com.google.protobuf.CodedOutputStream
2124          .computeUInt64Size(6, transactionId_);
2125      }
2126      if (((bitField0_ & 0x00000040) == 0x00000040)) {
2127        size += com.google.protobuf.CodedOutputStream
2128          .computeUInt64Size(7, rollingUpgradeStartTime_);
2129      }
2130      size += getUnknownFields().getSerializedSize();
2131      memoizedSerializedSize = size;
2132      return size;
2133    }
2134
2135    private static final long serialVersionUID = 0L;
2136    @java.lang.Override
2137    protected java.lang.Object writeReplace()
2138        throws java.io.ObjectStreamException {
2139      return super.writeReplace();
2140    }
2141
2142    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2143        com.google.protobuf.ByteString data)
2144        throws com.google.protobuf.InvalidProtocolBufferException {
2145      return PARSER.parseFrom(data);
2146    }
2147    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2148        com.google.protobuf.ByteString data,
2149        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2150        throws com.google.protobuf.InvalidProtocolBufferException {
2151      return PARSER.parseFrom(data, extensionRegistry);
2152    }
2153    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(byte[] data)
2154        throws com.google.protobuf.InvalidProtocolBufferException {
2155      return PARSER.parseFrom(data);
2156    }
2157    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2158        byte[] data,
2159        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2160        throws com.google.protobuf.InvalidProtocolBufferException {
2161      return PARSER.parseFrom(data, extensionRegistry);
2162    }
2163    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(java.io.InputStream input)
2164        throws java.io.IOException {
2165      return PARSER.parseFrom(input);
2166    }
2167    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2168        java.io.InputStream input,
2169        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2170        throws java.io.IOException {
2171      return PARSER.parseFrom(input, extensionRegistry);
2172    }
2173    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom(java.io.InputStream input)
2174        throws java.io.IOException {
2175      return PARSER.parseDelimitedFrom(input);
2176    }
2177    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom(
2178        java.io.InputStream input,
2179        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2180        throws java.io.IOException {
2181      return PARSER.parseDelimitedFrom(input, extensionRegistry);
2182    }
2183    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2184        com.google.protobuf.CodedInputStream input)
2185        throws java.io.IOException {
2186      return PARSER.parseFrom(input);
2187    }
2188    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
2189        com.google.protobuf.CodedInputStream input,
2190        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2191        throws java.io.IOException {
2192      return PARSER.parseFrom(input, extensionRegistry);
2193    }
2194
2195    public static Builder newBuilder() { return Builder.create(); }
2196    public Builder newBuilderForType() { return newBuilder(); }
2197    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection prototype) {
2198      return newBuilder().mergeFrom(prototype);
2199    }
2200    public Builder toBuilder() { return newBuilder(this); }
2201
2202    @java.lang.Override
2203    protected Builder newBuilderForType(
2204        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2205      Builder builder = new Builder(parent);
2206      return builder;
2207    }
2208    /**
2209     * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection}
2210     *
2211     * <pre>
2212     **
2213     * Name: NS_INFO
2214     * </pre>
2215     */
2216    public static final class Builder extends
2217        com.google.protobuf.GeneratedMessage.Builder<Builder>
2218       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSectionOrBuilder {
2219      public static final com.google.protobuf.Descriptors.Descriptor
2220          getDescriptor() {
2221        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
2222      }
2223
2224      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2225          internalGetFieldAccessorTable() {
2226        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable
2227            .ensureFieldAccessorsInitialized(
2228                org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class);
2229      }
2230
2231      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.newBuilder()
2232      private Builder() {
2233        maybeForceBuilderInitialization();
2234      }
2235
2236      private Builder(
2237          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2238        super(parent);
2239        maybeForceBuilderInitialization();
2240      }
2241      private void maybeForceBuilderInitialization() {
2242        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2243        }
2244      }
2245      private static Builder create() {
2246        return new Builder();
2247      }
2248
2249      public Builder clear() {
2250        super.clear();
2251        namespaceId_ = 0;
2252        bitField0_ = (bitField0_ & ~0x00000001);
2253        genstampV1_ = 0L;
2254        bitField0_ = (bitField0_ & ~0x00000002);
2255        genstampV2_ = 0L;
2256        bitField0_ = (bitField0_ & ~0x00000004);
2257        genstampV1Limit_ = 0L;
2258        bitField0_ = (bitField0_ & ~0x00000008);
2259        lastAllocatedBlockId_ = 0L;
2260        bitField0_ = (bitField0_ & ~0x00000010);
2261        transactionId_ = 0L;
2262        bitField0_ = (bitField0_ & ~0x00000020);
2263        rollingUpgradeStartTime_ = 0L;
2264        bitField0_ = (bitField0_ & ~0x00000040);
2265        return this;
2266      }
2267
2268      public Builder clone() {
2269        return create().mergeFrom(buildPartial());
2270      }
2271
2272      public com.google.protobuf.Descriptors.Descriptor
2273          getDescriptorForType() {
2274        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
2275      }
2276
2277      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstanceForType() {
2278        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance();
2279      }
2280
2281      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection build() {
2282        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = buildPartial();
2283        if (!result.isInitialized()) {
2284          throw newUninitializedMessageException(result);
2285        }
2286        return result;
2287      }
2288
2289      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection buildPartial() {
2290        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection(this);
2291        int from_bitField0_ = bitField0_;
2292        int to_bitField0_ = 0;
2293        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2294          to_bitField0_ |= 0x00000001;
2295        }
2296        result.namespaceId_ = namespaceId_;
2297        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2298          to_bitField0_ |= 0x00000002;
2299        }
2300        result.genstampV1_ = genstampV1_;
2301        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
2302          to_bitField0_ |= 0x00000004;
2303        }
2304        result.genstampV2_ = genstampV2_;
2305        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
2306          to_bitField0_ |= 0x00000008;
2307        }
2308        result.genstampV1Limit_ = genstampV1Limit_;
2309        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
2310          to_bitField0_ |= 0x00000010;
2311        }
2312        result.lastAllocatedBlockId_ = lastAllocatedBlockId_;
2313        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
2314          to_bitField0_ |= 0x00000020;
2315        }
2316        result.transactionId_ = transactionId_;
2317        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
2318          to_bitField0_ |= 0x00000040;
2319        }
2320        result.rollingUpgradeStartTime_ = rollingUpgradeStartTime_;
2321        result.bitField0_ = to_bitField0_;
2322        onBuilt();
2323        return result;
2324      }
2325
2326      public Builder mergeFrom(com.google.protobuf.Message other) {
2327        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) {
2328          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection)other);
2329        } else {
2330          super.mergeFrom(other);
2331          return this;
2332        }
2333      }
2334
2335      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection other) {
2336        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance()) return this;
2337        if (other.hasNamespaceId()) {
2338          setNamespaceId(other.getNamespaceId());
2339        }
2340        if (other.hasGenstampV1()) {
2341          setGenstampV1(other.getGenstampV1());
2342        }
2343        if (other.hasGenstampV2()) {
2344          setGenstampV2(other.getGenstampV2());
2345        }
2346        if (other.hasGenstampV1Limit()) {
2347          setGenstampV1Limit(other.getGenstampV1Limit());
2348        }
2349        if (other.hasLastAllocatedBlockId()) {
2350          setLastAllocatedBlockId(other.getLastAllocatedBlockId());
2351        }
2352        if (other.hasTransactionId()) {
2353          setTransactionId(other.getTransactionId());
2354        }
2355        if (other.hasRollingUpgradeStartTime()) {
2356          setRollingUpgradeStartTime(other.getRollingUpgradeStartTime());
2357        }
2358        this.mergeUnknownFields(other.getUnknownFields());
2359        return this;
2360      }
2361
2362      public final boolean isInitialized() {
2363        return true;
2364      }
2365
2366      public Builder mergeFrom(
2367          com.google.protobuf.CodedInputStream input,
2368          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2369          throws java.io.IOException {
2370        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parsedMessage = null;
2371        try {
2372          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2373        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2374          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) e.getUnfinishedMessage();
2375          throw e;
2376        } finally {
2377          if (parsedMessage != null) {
2378            mergeFrom(parsedMessage);
2379          }
2380        }
2381        return this;
2382      }
2383      private int bitField0_;
2384
2385      // optional uint32 namespaceId = 1;
2386      private int namespaceId_ ;
2387      /**
2388       * <code>optional uint32 namespaceId = 1;</code>
2389       */
2390      public boolean hasNamespaceId() {
2391        return ((bitField0_ & 0x00000001) == 0x00000001);
2392      }
2393      /**
2394       * <code>optional uint32 namespaceId = 1;</code>
2395       */
2396      public int getNamespaceId() {
2397        return namespaceId_;
2398      }
2399      /**
2400       * <code>optional uint32 namespaceId = 1;</code>
2401       */
2402      public Builder setNamespaceId(int value) {
2403        bitField0_ |= 0x00000001;
2404        namespaceId_ = value;
2405        onChanged();
2406        return this;
2407      }
2408      /**
2409       * <code>optional uint32 namespaceId = 1;</code>
2410       */
2411      public Builder clearNamespaceId() {
2412        bitField0_ = (bitField0_ & ~0x00000001);
2413        namespaceId_ = 0;
2414        onChanged();
2415        return this;
2416      }
2417
2418      // optional uint64 genstampV1 = 2;
2419      private long genstampV1_ ;
2420      /**
2421       * <code>optional uint64 genstampV1 = 2;</code>
2422       */
2423      public boolean hasGenstampV1() {
2424        return ((bitField0_ & 0x00000002) == 0x00000002);
2425      }
2426      /**
2427       * <code>optional uint64 genstampV1 = 2;</code>
2428       */
2429      public long getGenstampV1() {
2430        return genstampV1_;
2431      }
2432      /**
2433       * <code>optional uint64 genstampV1 = 2;</code>
2434       */
2435      public Builder setGenstampV1(long value) {
2436        bitField0_ |= 0x00000002;
2437        genstampV1_ = value;
2438        onChanged();
2439        return this;
2440      }
2441      /**
2442       * <code>optional uint64 genstampV1 = 2;</code>
2443       */
2444      public Builder clearGenstampV1() {
2445        bitField0_ = (bitField0_ & ~0x00000002);
2446        genstampV1_ = 0L;
2447        onChanged();
2448        return this;
2449      }
2450
2451      // optional uint64 genstampV2 = 3;
2452      private long genstampV2_ ;
2453      /**
2454       * <code>optional uint64 genstampV2 = 3;</code>
2455       */
2456      public boolean hasGenstampV2() {
2457        return ((bitField0_ & 0x00000004) == 0x00000004);
2458      }
2459      /**
2460       * <code>optional uint64 genstampV2 = 3;</code>
2461       */
2462      public long getGenstampV2() {
2463        return genstampV2_;
2464      }
2465      /**
2466       * <code>optional uint64 genstampV2 = 3;</code>
2467       */
2468      public Builder setGenstampV2(long value) {
2469        bitField0_ |= 0x00000004;
2470        genstampV2_ = value;
2471        onChanged();
2472        return this;
2473      }
2474      /**
2475       * <code>optional uint64 genstampV2 = 3;</code>
2476       */
2477      public Builder clearGenstampV2() {
2478        bitField0_ = (bitField0_ & ~0x00000004);
2479        genstampV2_ = 0L;
2480        onChanged();
2481        return this;
2482      }
2483
2484      // optional uint64 genstampV1Limit = 4;
2485      private long genstampV1Limit_ ;
2486      /**
2487       * <code>optional uint64 genstampV1Limit = 4;</code>
2488       */
2489      public boolean hasGenstampV1Limit() {
2490        return ((bitField0_ & 0x00000008) == 0x00000008);
2491      }
2492      /**
2493       * <code>optional uint64 genstampV1Limit = 4;</code>
2494       */
2495      public long getGenstampV1Limit() {
2496        return genstampV1Limit_;
2497      }
2498      /**
2499       * <code>optional uint64 genstampV1Limit = 4;</code>
2500       */
2501      public Builder setGenstampV1Limit(long value) {
2502        bitField0_ |= 0x00000008;
2503        genstampV1Limit_ = value;
2504        onChanged();
2505        return this;
2506      }
2507      /**
2508       * <code>optional uint64 genstampV1Limit = 4;</code>
2509       */
2510      public Builder clearGenstampV1Limit() {
2511        bitField0_ = (bitField0_ & ~0x00000008);
2512        genstampV1Limit_ = 0L;
2513        onChanged();
2514        return this;
2515      }
2516
2517      // optional uint64 lastAllocatedBlockId = 5;
2518      private long lastAllocatedBlockId_ ;
2519      /**
2520       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2521       */
2522      public boolean hasLastAllocatedBlockId() {
2523        return ((bitField0_ & 0x00000010) == 0x00000010);
2524      }
2525      /**
2526       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2527       */
2528      public long getLastAllocatedBlockId() {
2529        return lastAllocatedBlockId_;
2530      }
2531      /**
2532       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2533       */
2534      public Builder setLastAllocatedBlockId(long value) {
2535        bitField0_ |= 0x00000010;
2536        lastAllocatedBlockId_ = value;
2537        onChanged();
2538        return this;
2539      }
2540      /**
2541       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
2542       */
2543      public Builder clearLastAllocatedBlockId() {
2544        bitField0_ = (bitField0_ & ~0x00000010);
2545        lastAllocatedBlockId_ = 0L;
2546        onChanged();
2547        return this;
2548      }
2549
2550      // optional uint64 transactionId = 6;
2551      private long transactionId_ ;
2552      /**
2553       * <code>optional uint64 transactionId = 6;</code>
2554       */
2555      public boolean hasTransactionId() {
2556        return ((bitField0_ & 0x00000020) == 0x00000020);
2557      }
2558      /**
2559       * <code>optional uint64 transactionId = 6;</code>
2560       */
2561      public long getTransactionId() {
2562        return transactionId_;
2563      }
2564      /**
2565       * <code>optional uint64 transactionId = 6;</code>
2566       */
2567      public Builder setTransactionId(long value) {
2568        bitField0_ |= 0x00000020;
2569        transactionId_ = value;
2570        onChanged();
2571        return this;
2572      }
2573      /**
2574       * <code>optional uint64 transactionId = 6;</code>
2575       */
2576      public Builder clearTransactionId() {
2577        bitField0_ = (bitField0_ & ~0x00000020);
2578        transactionId_ = 0L;
2579        onChanged();
2580        return this;
2581      }
2582
2583      // optional uint64 rollingUpgradeStartTime = 7;
2584      private long rollingUpgradeStartTime_ ;
2585      /**
2586       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2587       */
2588      public boolean hasRollingUpgradeStartTime() {
2589        return ((bitField0_ & 0x00000040) == 0x00000040);
2590      }
2591      /**
2592       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2593       */
2594      public long getRollingUpgradeStartTime() {
2595        return rollingUpgradeStartTime_;
2596      }
2597      /**
2598       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2599       */
2600      public Builder setRollingUpgradeStartTime(long value) {
2601        bitField0_ |= 0x00000040;
2602        rollingUpgradeStartTime_ = value;
2603        onChanged();
2604        return this;
2605      }
2606      /**
2607       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
2608       */
2609      public Builder clearRollingUpgradeStartTime() {
2610        bitField0_ = (bitField0_ & ~0x00000040);
2611        rollingUpgradeStartTime_ = 0L;
2612        onChanged();
2613        return this;
2614      }
2615
2616      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.NameSystemSection)
2617    }
2618
2619    static {
2620      defaultInstance = new NameSystemSection(true);
2621      defaultInstance.initFields();
2622    }
2623
2624    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.NameSystemSection)
2625  }
2626
2627  public interface INodeSectionOrBuilder
2628      extends com.google.protobuf.MessageOrBuilder {
2629
2630    // optional uint64 lastInodeId = 1;
2631    /**
2632     * <code>optional uint64 lastInodeId = 1;</code>
2633     */
2634    boolean hasLastInodeId();
2635    /**
2636     * <code>optional uint64 lastInodeId = 1;</code>
2637     */
2638    long getLastInodeId();
2639
2640    // optional uint64 numInodes = 2;
2641    /**
2642     * <code>optional uint64 numInodes = 2;</code>
2643     *
2644     * <pre>
2645     * repeated INodes..
2646     * </pre>
2647     */
2648    boolean hasNumInodes();
2649    /**
2650     * <code>optional uint64 numInodes = 2;</code>
2651     *
2652     * <pre>
2653     * repeated INodes..
2654     * </pre>
2655     */
2656    long getNumInodes();
2657  }
2658  /**
2659   * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection}
2660   *
2661   * <pre>
2662   **
2663   * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
2664   * The first and the second parts are the string ids of the user and
2665   * group name, and the last 16 bits are the permission bits.
2666   *
2667   * Name: INODE
2668   * </pre>
2669   */
2670  public static final class INodeSection extends
2671      com.google.protobuf.GeneratedMessage
2672      implements INodeSectionOrBuilder {
2673    // Use INodeSection.newBuilder() to construct.
2674    private INodeSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2675      super(builder);
2676      this.unknownFields = builder.getUnknownFields();
2677    }
2678    private INodeSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2679
2680    private static final INodeSection defaultInstance;
2681    public static INodeSection getDefaultInstance() {
2682      return defaultInstance;
2683    }
2684
2685    public INodeSection getDefaultInstanceForType() {
2686      return defaultInstance;
2687    }
2688
2689    private final com.google.protobuf.UnknownFieldSet unknownFields;
2690    @java.lang.Override
2691    public final com.google.protobuf.UnknownFieldSet
2692        getUnknownFields() {
2693      return this.unknownFields;
2694    }
2695    private INodeSection(
2696        com.google.protobuf.CodedInputStream input,
2697        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2698        throws com.google.protobuf.InvalidProtocolBufferException {
2699      initFields();
2700      int mutable_bitField0_ = 0;
2701      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2702          com.google.protobuf.UnknownFieldSet.newBuilder();
2703      try {
2704        boolean done = false;
2705        while (!done) {
2706          int tag = input.readTag();
2707          switch (tag) {
2708            case 0:
2709              done = true;
2710              break;
2711            default: {
2712              if (!parseUnknownField(input, unknownFields,
2713                                     extensionRegistry, tag)) {
2714                done = true;
2715              }
2716              break;
2717            }
2718            case 8: {
2719              bitField0_ |= 0x00000001;
2720              lastInodeId_ = input.readUInt64();
2721              break;
2722            }
2723            case 16: {
2724              bitField0_ |= 0x00000002;
2725              numInodes_ = input.readUInt64();
2726              break;
2727            }
2728          }
2729        }
2730      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2731        throw e.setUnfinishedMessage(this);
2732      } catch (java.io.IOException e) {
2733        throw new com.google.protobuf.InvalidProtocolBufferException(
2734            e.getMessage()).setUnfinishedMessage(this);
2735      } finally {
2736        this.unknownFields = unknownFields.build();
2737        makeExtensionsImmutable();
2738      }
2739    }
2740    public static final com.google.protobuf.Descriptors.Descriptor
2741        getDescriptor() {
2742      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
2743    }
2744
2745    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2746        internalGetFieldAccessorTable() {
2747      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable
2748          .ensureFieldAccessorsInitialized(
2749              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class);
2750    }
2751
2752    public static com.google.protobuf.Parser<INodeSection> PARSER =
2753        new com.google.protobuf.AbstractParser<INodeSection>() {
2754      public INodeSection parsePartialFrom(
2755          com.google.protobuf.CodedInputStream input,
2756          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2757          throws com.google.protobuf.InvalidProtocolBufferException {
2758        return new INodeSection(input, extensionRegistry);
2759      }
2760    };
2761
2762    @java.lang.Override
2763    public com.google.protobuf.Parser<INodeSection> getParserForType() {
2764      return PARSER;
2765    }
2766
2767    public interface FileUnderConstructionFeatureOrBuilder
2768        extends com.google.protobuf.MessageOrBuilder {
2769
2770      // optional string clientName = 1;
2771      /**
2772       * <code>optional string clientName = 1;</code>
2773       */
2774      boolean hasClientName();
2775      /**
2776       * <code>optional string clientName = 1;</code>
2777       */
2778      java.lang.String getClientName();
2779      /**
2780       * <code>optional string clientName = 1;</code>
2781       */
2782      com.google.protobuf.ByteString
2783          getClientNameBytes();
2784
2785      // optional string clientMachine = 2;
2786      /**
2787       * <code>optional string clientMachine = 2;</code>
2788       */
2789      boolean hasClientMachine();
2790      /**
2791       * <code>optional string clientMachine = 2;</code>
2792       */
2793      java.lang.String getClientMachine();
2794      /**
2795       * <code>optional string clientMachine = 2;</code>
2796       */
2797      com.google.protobuf.ByteString
2798          getClientMachineBytes();
2799    }
2800    /**
2801     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature}
2802     *
2803     * <pre>
2804     **
2805     * under-construction feature for INodeFile
2806     * </pre>
2807     */
2808    public static final class FileUnderConstructionFeature extends
2809        com.google.protobuf.GeneratedMessage
2810        implements FileUnderConstructionFeatureOrBuilder {
2811      // Use FileUnderConstructionFeature.newBuilder() to construct.
2812      private FileUnderConstructionFeature(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813        super(builder);
2814        this.unknownFields = builder.getUnknownFields();
2815      }
2816      private FileUnderConstructionFeature(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817
2818      private static final FileUnderConstructionFeature defaultInstance;
2819      public static FileUnderConstructionFeature getDefaultInstance() {
2820        return defaultInstance;
2821      }
2822
2823      public FileUnderConstructionFeature getDefaultInstanceForType() {
2824        return defaultInstance;
2825      }
2826
2827      private final com.google.protobuf.UnknownFieldSet unknownFields;
2828      @java.lang.Override
2829      public final com.google.protobuf.UnknownFieldSet
2830          getUnknownFields() {
2831        return this.unknownFields;
2832      }
2833      private FileUnderConstructionFeature(
2834          com.google.protobuf.CodedInputStream input,
2835          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836          throws com.google.protobuf.InvalidProtocolBufferException {
2837        initFields();
2838        int mutable_bitField0_ = 0;
2839        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840            com.google.protobuf.UnknownFieldSet.newBuilder();
2841        try {
2842          boolean done = false;
2843          while (!done) {
2844            int tag = input.readTag();
2845            switch (tag) {
2846              case 0:
2847                done = true;
2848                break;
2849              default: {
2850                if (!parseUnknownField(input, unknownFields,
2851                                       extensionRegistry, tag)) {
2852                  done = true;
2853                }
2854                break;
2855              }
2856              case 10: {
2857                bitField0_ |= 0x00000001;
2858                clientName_ = input.readBytes();
2859                break;
2860              }
2861              case 18: {
2862                bitField0_ |= 0x00000002;
2863                clientMachine_ = input.readBytes();
2864                break;
2865              }
2866            }
2867          }
2868        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2869          throw e.setUnfinishedMessage(this);
2870        } catch (java.io.IOException e) {
2871          throw new com.google.protobuf.InvalidProtocolBufferException(
2872              e.getMessage()).setUnfinishedMessage(this);
2873        } finally {
2874          this.unknownFields = unknownFields.build();
2875          makeExtensionsImmutable();
2876        }
2877      }
2878      public static final com.google.protobuf.Descriptors.Descriptor
2879          getDescriptor() {
2880        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
2881      }
2882
2883      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2884          internalGetFieldAccessorTable() {
2885        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable
2886            .ensureFieldAccessorsInitialized(
2887                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class);
2888      }
2889
2890      public static com.google.protobuf.Parser<FileUnderConstructionFeature> PARSER =
2891          new com.google.protobuf.AbstractParser<FileUnderConstructionFeature>() {
2892        public FileUnderConstructionFeature parsePartialFrom(
2893            com.google.protobuf.CodedInputStream input,
2894            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2895            throws com.google.protobuf.InvalidProtocolBufferException {
2896          return new FileUnderConstructionFeature(input, extensionRegistry);
2897        }
2898      };
2899
2900      @java.lang.Override
2901      public com.google.protobuf.Parser<FileUnderConstructionFeature> getParserForType() {
2902        return PARSER;
2903      }
2904
2905      private int bitField0_;
2906      // optional string clientName = 1;
2907      public static final int CLIENTNAME_FIELD_NUMBER = 1;
2908      private java.lang.Object clientName_;
2909      /**
2910       * <code>optional string clientName = 1;</code>
2911       */
2912      public boolean hasClientName() {
2913        return ((bitField0_ & 0x00000001) == 0x00000001);
2914      }
2915      /**
2916       * <code>optional string clientName = 1;</code>
2917       */
2918      public java.lang.String getClientName() {
2919        java.lang.Object ref = clientName_;
2920        if (ref instanceof java.lang.String) {
2921          return (java.lang.String) ref;
2922        } else {
2923          com.google.protobuf.ByteString bs = 
2924              (com.google.protobuf.ByteString) ref;
2925          java.lang.String s = bs.toStringUtf8();
2926          if (bs.isValidUtf8()) {
2927            clientName_ = s;
2928          }
2929          return s;
2930        }
2931      }
2932      /**
2933       * <code>optional string clientName = 1;</code>
2934       */
2935      public com.google.protobuf.ByteString
2936          getClientNameBytes() {
2937        java.lang.Object ref = clientName_;
2938        if (ref instanceof java.lang.String) {
2939          com.google.protobuf.ByteString b = 
2940              com.google.protobuf.ByteString.copyFromUtf8(
2941                  (java.lang.String) ref);
2942          clientName_ = b;
2943          return b;
2944        } else {
2945          return (com.google.protobuf.ByteString) ref;
2946        }
2947      }
2948
2949      // optional string clientMachine = 2;
2950      public static final int CLIENTMACHINE_FIELD_NUMBER = 2;
2951      private java.lang.Object clientMachine_;
2952      /**
2953       * <code>optional string clientMachine = 2;</code>
2954       */
2955      public boolean hasClientMachine() {
2956        return ((bitField0_ & 0x00000002) == 0x00000002);
2957      }
2958      /**
2959       * <code>optional string clientMachine = 2;</code>
2960       */
2961      public java.lang.String getClientMachine() {
2962        java.lang.Object ref = clientMachine_;
2963        if (ref instanceof java.lang.String) {
2964          return (java.lang.String) ref;
2965        } else {
2966          com.google.protobuf.ByteString bs = 
2967              (com.google.protobuf.ByteString) ref;
2968          java.lang.String s = bs.toStringUtf8();
2969          if (bs.isValidUtf8()) {
2970            clientMachine_ = s;
2971          }
2972          return s;
2973        }
2974      }
2975      /**
2976       * <code>optional string clientMachine = 2;</code>
2977       */
2978      public com.google.protobuf.ByteString
2979          getClientMachineBytes() {
2980        java.lang.Object ref = clientMachine_;
2981        if (ref instanceof java.lang.String) {
2982          com.google.protobuf.ByteString b = 
2983              com.google.protobuf.ByteString.copyFromUtf8(
2984                  (java.lang.String) ref);
2985          clientMachine_ = b;
2986          return b;
2987        } else {
2988          return (com.google.protobuf.ByteString) ref;
2989        }
2990      }
2991
2992      private void initFields() {
2993        clientName_ = "";
2994        clientMachine_ = "";
2995      }
2996      private byte memoizedIsInitialized = -1;
2997      public final boolean isInitialized() {
2998        byte isInitialized = memoizedIsInitialized;
2999        if (isInitialized != -1) return isInitialized == 1;
3000
3001        memoizedIsInitialized = 1;
3002        return true;
3003      }
3004
3005      public void writeTo(com.google.protobuf.CodedOutputStream output)
3006                          throws java.io.IOException {
3007        getSerializedSize();
3008        if (((bitField0_ & 0x00000001) == 0x00000001)) {
3009          output.writeBytes(1, getClientNameBytes());
3010        }
3011        if (((bitField0_ & 0x00000002) == 0x00000002)) {
3012          output.writeBytes(2, getClientMachineBytes());
3013        }
3014        getUnknownFields().writeTo(output);
3015      }
3016
3017      private int memoizedSerializedSize = -1;
3018      public int getSerializedSize() {
3019        int size = memoizedSerializedSize;
3020        if (size != -1) return size;
3021
3022        size = 0;
3023        if (((bitField0_ & 0x00000001) == 0x00000001)) {
3024          size += com.google.protobuf.CodedOutputStream
3025            .computeBytesSize(1, getClientNameBytes());
3026        }
3027        if (((bitField0_ & 0x00000002) == 0x00000002)) {
3028          size += com.google.protobuf.CodedOutputStream
3029            .computeBytesSize(2, getClientMachineBytes());
3030        }
3031        size += getUnknownFields().getSerializedSize();
3032        memoizedSerializedSize = size;
3033        return size;
3034      }
3035
3036      private static final long serialVersionUID = 0L;
3037      @java.lang.Override
3038      protected java.lang.Object writeReplace()
3039          throws java.io.ObjectStreamException {
3040        return super.writeReplace();
3041      }
3042
3043      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3044          com.google.protobuf.ByteString data)
3045          throws com.google.protobuf.InvalidProtocolBufferException {
3046        return PARSER.parseFrom(data);
3047      }
3048      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3049          com.google.protobuf.ByteString data,
3050          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3051          throws com.google.protobuf.InvalidProtocolBufferException {
3052        return PARSER.parseFrom(data, extensionRegistry);
3053      }
3054      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(byte[] data)
3055          throws com.google.protobuf.InvalidProtocolBufferException {
3056        return PARSER.parseFrom(data);
3057      }
3058      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3059          byte[] data,
3060          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3061          throws com.google.protobuf.InvalidProtocolBufferException {
3062        return PARSER.parseFrom(data, extensionRegistry);
3063      }
3064      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(java.io.InputStream input)
3065          throws java.io.IOException {
3066        return PARSER.parseFrom(input);
3067      }
3068      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3069          java.io.InputStream input,
3070          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3071          throws java.io.IOException {
3072        return PARSER.parseFrom(input, extensionRegistry);
3073      }
3074      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom(java.io.InputStream input)
3075          throws java.io.IOException {
3076        return PARSER.parseDelimitedFrom(input);
3077      }
3078      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom(
3079          java.io.InputStream input,
3080          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3081          throws java.io.IOException {
3082        return PARSER.parseDelimitedFrom(input, extensionRegistry);
3083      }
3084      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3085          com.google.protobuf.CodedInputStream input)
3086          throws java.io.IOException {
3087        return PARSER.parseFrom(input);
3088      }
3089      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
3090          com.google.protobuf.CodedInputStream input,
3091          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3092          throws java.io.IOException {
3093        return PARSER.parseFrom(input, extensionRegistry);
3094      }
3095
3096      public static Builder newBuilder() { return Builder.create(); }
3097      public Builder newBuilderForType() { return newBuilder(); }
3098      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature prototype) {
3099        return newBuilder().mergeFrom(prototype);
3100      }
3101      public Builder toBuilder() { return newBuilder(this); }
3102
3103      @java.lang.Override
3104      protected Builder newBuilderForType(
3105          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3106        Builder builder = new Builder(parent);
3107        return builder;
3108      }
3109      /**
3110       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature}
3111       *
3112       * <pre>
3113       **
3114       * under-construction feature for INodeFile
3115       * </pre>
3116       */
3117      public static final class Builder extends
3118          com.google.protobuf.GeneratedMessage.Builder<Builder>
3119         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder {
3120        public static final com.google.protobuf.Descriptors.Descriptor
3121            getDescriptor() {
3122          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
3123        }
3124
3125        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3126            internalGetFieldAccessorTable() {
3127          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable
3128              .ensureFieldAccessorsInitialized(
3129                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class);
3130        }
3131
3132        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.newBuilder()
3133        private Builder() {
3134          maybeForceBuilderInitialization();
3135        }
3136
3137        private Builder(
3138            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3139          super(parent);
3140          maybeForceBuilderInitialization();
3141        }
3142        private void maybeForceBuilderInitialization() {
3143          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3144          }
3145        }
3146        private static Builder create() {
3147          return new Builder();
3148        }
3149
3150        public Builder clear() {
3151          super.clear();
3152          clientName_ = "";
3153          bitField0_ = (bitField0_ & ~0x00000001);
3154          clientMachine_ = "";
3155          bitField0_ = (bitField0_ & ~0x00000002);
3156          return this;
3157        }
3158
3159        public Builder clone() {
3160          return create().mergeFrom(buildPartial());
3161        }
3162
3163        public com.google.protobuf.Descriptors.Descriptor
3164            getDescriptorForType() {
3165          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
3166        }
3167
3168        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstanceForType() {
3169          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
3170        }
3171
3172        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature build() {
3173          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = buildPartial();
3174          if (!result.isInitialized()) {
3175            throw newUninitializedMessageException(result);
3176          }
3177          return result;
3178        }
3179
3180        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature buildPartial() {
3181          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature(this);
3182          int from_bitField0_ = bitField0_;
3183          int to_bitField0_ = 0;
3184          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3185            to_bitField0_ |= 0x00000001;
3186          }
3187          result.clientName_ = clientName_;
3188          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3189            to_bitField0_ |= 0x00000002;
3190          }
3191          result.clientMachine_ = clientMachine_;
3192          result.bitField0_ = to_bitField0_;
3193          onBuilt();
3194          return result;
3195        }
3196
3197        public Builder mergeFrom(com.google.protobuf.Message other) {
3198          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) {
3199            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature)other);
3200          } else {
3201            super.mergeFrom(other);
3202            return this;
3203          }
3204        }
3205
3206        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature other) {
3207          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) return this;
3208          if (other.hasClientName()) {
3209            bitField0_ |= 0x00000001;
3210            clientName_ = other.clientName_;
3211            onChanged();
3212          }
3213          if (other.hasClientMachine()) {
3214            bitField0_ |= 0x00000002;
3215            clientMachine_ = other.clientMachine_;
3216            onChanged();
3217          }
3218          this.mergeUnknownFields(other.getUnknownFields());
3219          return this;
3220        }
3221
3222        public final boolean isInitialized() {
3223          return true;
3224        }
3225
3226        public Builder mergeFrom(
3227            com.google.protobuf.CodedInputStream input,
3228            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3229            throws java.io.IOException {
3230          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parsedMessage = null;
3231          try {
3232            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3233          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3234            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) e.getUnfinishedMessage();
3235            throw e;
3236          } finally {
3237            if (parsedMessage != null) {
3238              mergeFrom(parsedMessage);
3239            }
3240          }
3241          return this;
3242        }
3243        private int bitField0_;
3244
3245        // optional string clientName = 1;
3246        private java.lang.Object clientName_ = "";
3247        /**
3248         * <code>optional string clientName = 1;</code>
3249         */
3250        public boolean hasClientName() {
3251          return ((bitField0_ & 0x00000001) == 0x00000001);
3252        }
3253        /**
3254         * <code>optional string clientName = 1;</code>
3255         */
3256        public java.lang.String getClientName() {
3257          java.lang.Object ref = clientName_;
3258          if (!(ref instanceof java.lang.String)) {
3259            java.lang.String s = ((com.google.protobuf.ByteString) ref)
3260                .toStringUtf8();
3261            clientName_ = s;
3262            return s;
3263          } else {
3264            return (java.lang.String) ref;
3265          }
3266        }
3267        /**
3268         * <code>optional string clientName = 1;</code>
3269         */
3270        public com.google.protobuf.ByteString
3271            getClientNameBytes() {
3272          java.lang.Object ref = clientName_;
3273          if (ref instanceof String) {
3274            com.google.protobuf.ByteString b = 
3275                com.google.protobuf.ByteString.copyFromUtf8(
3276                    (java.lang.String) ref);
3277            clientName_ = b;
3278            return b;
3279          } else {
3280            return (com.google.protobuf.ByteString) ref;
3281          }
3282        }
3283        /**
3284         * <code>optional string clientName = 1;</code>
3285         */
3286        public Builder setClientName(
3287            java.lang.String value) {
3288          if (value == null) {
3289    throw new NullPointerException();
3290  }
3291  bitField0_ |= 0x00000001;
3292          clientName_ = value;
3293          onChanged();
3294          return this;
3295        }
3296        /**
3297         * <code>optional string clientName = 1;</code>
3298         */
3299        public Builder clearClientName() {
3300          bitField0_ = (bitField0_ & ~0x00000001);
3301          clientName_ = getDefaultInstance().getClientName();
3302          onChanged();
3303          return this;
3304        }
3305        /**
3306         * <code>optional string clientName = 1;</code>
3307         */
3308        public Builder setClientNameBytes(
3309            com.google.protobuf.ByteString value) {
3310          if (value == null) {
3311    throw new NullPointerException();
3312  }
3313  bitField0_ |= 0x00000001;
3314          clientName_ = value;
3315          onChanged();
3316          return this;
3317        }
3318
3319        // optional string clientMachine = 2;
3320        private java.lang.Object clientMachine_ = "";
3321        /**
3322         * <code>optional string clientMachine = 2;</code>
3323         */
3324        public boolean hasClientMachine() {
3325          return ((bitField0_ & 0x00000002) == 0x00000002);
3326        }
3327        /**
3328         * <code>optional string clientMachine = 2;</code>
3329         */
3330        public java.lang.String getClientMachine() {
3331          java.lang.Object ref = clientMachine_;
3332          if (!(ref instanceof java.lang.String)) {
3333            java.lang.String s = ((com.google.protobuf.ByteString) ref)
3334                .toStringUtf8();
3335            clientMachine_ = s;
3336            return s;
3337          } else {
3338            return (java.lang.String) ref;
3339          }
3340        }
3341        /**
3342         * <code>optional string clientMachine = 2;</code>
3343         */
3344        public com.google.protobuf.ByteString
3345            getClientMachineBytes() {
3346          java.lang.Object ref = clientMachine_;
3347          if (ref instanceof String) {
3348            com.google.protobuf.ByteString b = 
3349                com.google.protobuf.ByteString.copyFromUtf8(
3350                    (java.lang.String) ref);
3351            clientMachine_ = b;
3352            return b;
3353          } else {
3354            return (com.google.protobuf.ByteString) ref;
3355          }
3356        }
3357        /**
3358         * <code>optional string clientMachine = 2;</code>
3359         */
3360        public Builder setClientMachine(
3361            java.lang.String value) {
3362          if (value == null) {
3363    throw new NullPointerException();
3364  }
3365  bitField0_ |= 0x00000002;
3366          clientMachine_ = value;
3367          onChanged();
3368          return this;
3369        }
3370        /**
3371         * <code>optional string clientMachine = 2;</code>
3372         */
3373        public Builder clearClientMachine() {
3374          bitField0_ = (bitField0_ & ~0x00000002);
3375          clientMachine_ = getDefaultInstance().getClientMachine();
3376          onChanged();
3377          return this;
3378        }
3379        /**
3380         * <code>optional string clientMachine = 2;</code>
3381         */
3382        public Builder setClientMachineBytes(
3383            com.google.protobuf.ByteString value) {
3384          if (value == null) {
3385    throw new NullPointerException();
3386  }
3387  bitField0_ |= 0x00000002;
3388          clientMachine_ = value;
3389          onChanged();
3390          return this;
3391        }
3392
3393        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
3394      }
3395
3396      static {
3397        defaultInstance = new FileUnderConstructionFeature(true);
3398        defaultInstance.initFields();
3399      }
3400
3401      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
3402    }
3403
3404    public interface AclFeatureProtoOrBuilder
3405        extends com.google.protobuf.MessageOrBuilder {
3406
3407      // repeated fixed32 entries = 2 [packed = true];
3408      /**
3409       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3410       *
3411       * <pre>
3412       **
3413       * An ACL entry is represented by a 32-bit integer in Big Endian
3414       * format. The bits can be divided in four segments:
3415       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3416       *
3417       * [0:2) -- reserved for futute uses.
3418       * [2:26) -- the name of the entry, which is an ID that points to a
3419       * string in the StringTableSection.
3420       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3421       * [27:29) -- the type of the entry (AclEntryTypeProto)
3422       * [29:32) -- the permission of the entry (FsActionProto)
3423       * </pre>
3424       */
3425      java.util.List<java.lang.Integer> getEntriesList();
3426      /**
3427       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3428       *
3429       * <pre>
3430       **
3431       * An ACL entry is represented by a 32-bit integer in Big Endian
3432       * format. The bits can be divided in four segments:
3433       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3434       *
3435       * [0:2) -- reserved for futute uses.
3436       * [2:26) -- the name of the entry, which is an ID that points to a
3437       * string in the StringTableSection.
3438       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3439       * [27:29) -- the type of the entry (AclEntryTypeProto)
3440       * [29:32) -- the permission of the entry (FsActionProto)
3441       * </pre>
3442       */
3443      int getEntriesCount();
3444      /**
3445       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3446       *
3447       * <pre>
3448       **
3449       * An ACL entry is represented by a 32-bit integer in Big Endian
3450       * format. The bits can be divided in four segments:
3451       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3452       *
3453       * [0:2) -- reserved for futute uses.
3454       * [2:26) -- the name of the entry, which is an ID that points to a
3455       * string in the StringTableSection.
3456       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3457       * [27:29) -- the type of the entry (AclEntryTypeProto)
3458       * [29:32) -- the permission of the entry (FsActionProto)
3459       * </pre>
3460       */
3461      int getEntries(int index);
3462    }
3463    /**
3464     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto}
3465     */
3466    public static final class AclFeatureProto extends
3467        com.google.protobuf.GeneratedMessage
3468        implements AclFeatureProtoOrBuilder {
3469      // Use AclFeatureProto.newBuilder() to construct.
3470      private AclFeatureProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3471        super(builder);
3472        this.unknownFields = builder.getUnknownFields();
3473      }
3474      private AclFeatureProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3475
3476      private static final AclFeatureProto defaultInstance;
3477      public static AclFeatureProto getDefaultInstance() {
3478        return defaultInstance;
3479      }
3480
3481      public AclFeatureProto getDefaultInstanceForType() {
3482        return defaultInstance;
3483      }
3484
3485      private final com.google.protobuf.UnknownFieldSet unknownFields;
3486      @java.lang.Override
3487      public final com.google.protobuf.UnknownFieldSet
3488          getUnknownFields() {
3489        return this.unknownFields;
3490      }
3491      private AclFeatureProto(
3492          com.google.protobuf.CodedInputStream input,
3493          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3494          throws com.google.protobuf.InvalidProtocolBufferException {
3495        initFields();
3496        int mutable_bitField0_ = 0;
3497        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3498            com.google.protobuf.UnknownFieldSet.newBuilder();
3499        try {
3500          boolean done = false;
3501          while (!done) {
3502            int tag = input.readTag();
3503            switch (tag) {
3504              case 0:
3505                done = true;
3506                break;
3507              default: {
3508                if (!parseUnknownField(input, unknownFields,
3509                                       extensionRegistry, tag)) {
3510                  done = true;
3511                }
3512                break;
3513              }
3514              case 21: {
3515                if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
3516                  entries_ = new java.util.ArrayList<java.lang.Integer>();
3517                  mutable_bitField0_ |= 0x00000001;
3518                }
3519                entries_.add(input.readFixed32());
3520                break;
3521              }
3522              case 18: {
3523                int length = input.readRawVarint32();
3524                int limit = input.pushLimit(length);
3525                if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
3526                  entries_ = new java.util.ArrayList<java.lang.Integer>();
3527                  mutable_bitField0_ |= 0x00000001;
3528                }
3529                while (input.getBytesUntilLimit() > 0) {
3530                  entries_.add(input.readFixed32());
3531                }
3532                input.popLimit(limit);
3533                break;
3534              }
3535            }
3536          }
3537        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3538          throw e.setUnfinishedMessage(this);
3539        } catch (java.io.IOException e) {
3540          throw new com.google.protobuf.InvalidProtocolBufferException(
3541              e.getMessage()).setUnfinishedMessage(this);
3542        } finally {
3543          if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
3544            entries_ = java.util.Collections.unmodifiableList(entries_);
3545          }
3546          this.unknownFields = unknownFields.build();
3547          makeExtensionsImmutable();
3548        }
3549      }
3550      public static final com.google.protobuf.Descriptors.Descriptor
3551          getDescriptor() {
3552        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
3553      }
3554
3555      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3556          internalGetFieldAccessorTable() {
3557        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable
3558            .ensureFieldAccessorsInitialized(
3559                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class);
3560      }
3561
3562      public static com.google.protobuf.Parser<AclFeatureProto> PARSER =
3563          new com.google.protobuf.AbstractParser<AclFeatureProto>() {
3564        public AclFeatureProto parsePartialFrom(
3565            com.google.protobuf.CodedInputStream input,
3566            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3567            throws com.google.protobuf.InvalidProtocolBufferException {
3568          return new AclFeatureProto(input, extensionRegistry);
3569        }
3570      };
3571
3572      @java.lang.Override
3573      public com.google.protobuf.Parser<AclFeatureProto> getParserForType() {
3574        return PARSER;
3575      }
3576
3577      // repeated fixed32 entries = 2 [packed = true];
3578      public static final int ENTRIES_FIELD_NUMBER = 2;
3579      private java.util.List<java.lang.Integer> entries_;
3580      /**
3581       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3582       *
3583       * <pre>
3584       **
3585       * An ACL entry is represented by a 32-bit integer in Big Endian
3586       * format. The bits can be divided in four segments:
3587       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3588       *
3589       * [0:2) -- reserved for futute uses.
3590       * [2:26) -- the name of the entry, which is an ID that points to a
3591       * string in the StringTableSection.
3592       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3593       * [27:29) -- the type of the entry (AclEntryTypeProto)
3594       * [29:32) -- the permission of the entry (FsActionProto)
3595       * </pre>
3596       */
3597      public java.util.List<java.lang.Integer>
3598          getEntriesList() {
3599        return entries_;
3600      }
3601      /**
3602       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3603       *
3604       * <pre>
3605       **
3606       * An ACL entry is represented by a 32-bit integer in Big Endian
3607       * format. The bits can be divided in four segments:
3608       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3609       *
3610       * [0:2) -- reserved for futute uses.
3611       * [2:26) -- the name of the entry, which is an ID that points to a
3612       * string in the StringTableSection.
3613       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3614       * [27:29) -- the type of the entry (AclEntryTypeProto)
3615       * [29:32) -- the permission of the entry (FsActionProto)
3616       * </pre>
3617       */
3618      public int getEntriesCount() {
3619        return entries_.size();
3620      }
3621      /**
3622       * <code>repeated fixed32 entries = 2 [packed = true];</code>
3623       *
3624       * <pre>
3625       **
3626       * An ACL entry is represented by a 32-bit integer in Big Endian
3627       * format. The bits can be divided in four segments:
3628       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3629       *
3630       * [0:2) -- reserved for futute uses.
3631       * [2:26) -- the name of the entry, which is an ID that points to a
3632       * string in the StringTableSection.
3633       * [26:27) -- the scope of the entry (AclEntryScopeProto)
3634       * [27:29) -- the type of the entry (AclEntryTypeProto)
3635       * [29:32) -- the permission of the entry (FsActionProto)
3636       * </pre>
3637       */
3638      public int getEntries(int index) {
3639        return entries_.get(index);
3640      }
3641      private int entriesMemoizedSerializedSize = -1;
3642
3643      private void initFields() {
3644        entries_ = java.util.Collections.emptyList();
3645      }
3646      private byte memoizedIsInitialized = -1;
3647      public final boolean isInitialized() {
3648        byte isInitialized = memoizedIsInitialized;
3649        if (isInitialized != -1) return isInitialized == 1;
3650
3651        memoizedIsInitialized = 1;
3652        return true;
3653      }
3654
3655      public void writeTo(com.google.protobuf.CodedOutputStream output)
3656                          throws java.io.IOException {
3657        getSerializedSize();
3658        if (getEntriesList().size() > 0) {
3659          output.writeRawVarint32(18);
3660          output.writeRawVarint32(entriesMemoizedSerializedSize);
3661        }
3662        for (int i = 0; i < entries_.size(); i++) {
3663          output.writeFixed32NoTag(entries_.get(i));
3664        }
3665        getUnknownFields().writeTo(output);
3666      }
3667
3668      private int memoizedSerializedSize = -1;
3669      public int getSerializedSize() {
3670        int size = memoizedSerializedSize;
3671        if (size != -1) return size;
3672
3673        size = 0;
3674        {
3675          int dataSize = 0;
3676          dataSize = 4 * getEntriesList().size();
3677          size += dataSize;
3678          if (!getEntriesList().isEmpty()) {
3679            size += 1;
3680            size += com.google.protobuf.CodedOutputStream
3681                .computeInt32SizeNoTag(dataSize);
3682          }
3683          entriesMemoizedSerializedSize = dataSize;
3684        }
3685        size += getUnknownFields().getSerializedSize();
3686        memoizedSerializedSize = size;
3687        return size;
3688      }
3689
3690      private static final long serialVersionUID = 0L;
3691      @java.lang.Override
3692      protected java.lang.Object writeReplace()
3693          throws java.io.ObjectStreamException {
3694        return super.writeReplace();
3695      }
3696
3697      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3698          com.google.protobuf.ByteString data)
3699          throws com.google.protobuf.InvalidProtocolBufferException {
3700        return PARSER.parseFrom(data);
3701      }
3702      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3703          com.google.protobuf.ByteString data,
3704          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3705          throws com.google.protobuf.InvalidProtocolBufferException {
3706        return PARSER.parseFrom(data, extensionRegistry);
3707      }
3708      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(byte[] data)
3709          throws com.google.protobuf.InvalidProtocolBufferException {
3710        return PARSER.parseFrom(data);
3711      }
3712      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3713          byte[] data,
3714          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3715          throws com.google.protobuf.InvalidProtocolBufferException {
3716        return PARSER.parseFrom(data, extensionRegistry);
3717      }
3718      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(java.io.InputStream input)
3719          throws java.io.IOException {
3720        return PARSER.parseFrom(input);
3721      }
3722      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3723          java.io.InputStream input,
3724          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3725          throws java.io.IOException {
3726        return PARSER.parseFrom(input, extensionRegistry);
3727      }
3728      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom(java.io.InputStream input)
3729          throws java.io.IOException {
3730        return PARSER.parseDelimitedFrom(input);
3731      }
3732      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom(
3733          java.io.InputStream input,
3734          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3735          throws java.io.IOException {
3736        return PARSER.parseDelimitedFrom(input, extensionRegistry);
3737      }
3738      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3739          com.google.protobuf.CodedInputStream input)
3740          throws java.io.IOException {
3741        return PARSER.parseFrom(input);
3742      }
3743      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
3744          com.google.protobuf.CodedInputStream input,
3745          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3746          throws java.io.IOException {
3747        return PARSER.parseFrom(input, extensionRegistry);
3748      }
3749
3750      public static Builder newBuilder() { return Builder.create(); }
3751      public Builder newBuilderForType() { return newBuilder(); }
3752      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto prototype) {
3753        return newBuilder().mergeFrom(prototype);
3754      }
3755      public Builder toBuilder() { return newBuilder(this); }
3756
3757      @java.lang.Override
3758      protected Builder newBuilderForType(
3759          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3760        Builder builder = new Builder(parent);
3761        return builder;
3762      }
3763      /**
3764       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto}
3765       */
3766      public static final class Builder extends
3767          com.google.protobuf.GeneratedMessage.Builder<Builder>
3768         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder {
3769        public static final com.google.protobuf.Descriptors.Descriptor
3770            getDescriptor() {
3771          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
3772        }
3773
3774        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775            internalGetFieldAccessorTable() {
3776          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable
3777              .ensureFieldAccessorsInitialized(
3778                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class);
3779        }
3780
3781        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder()
3782        private Builder() {
3783          maybeForceBuilderInitialization();
3784        }
3785
3786        private Builder(
3787            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3788          super(parent);
3789          maybeForceBuilderInitialization();
3790        }
3791        private void maybeForceBuilderInitialization() {
3792          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3793          }
3794        }
3795        private static Builder create() {
3796          return new Builder();
3797        }
3798
3799        public Builder clear() {
3800          super.clear();
3801          entries_ = java.util.Collections.emptyList();
3802          bitField0_ = (bitField0_ & ~0x00000001);
3803          return this;
3804        }
3805
3806        public Builder clone() {
3807          return create().mergeFrom(buildPartial());
3808        }
3809
3810        public com.google.protobuf.Descriptors.Descriptor
3811            getDescriptorForType() {
3812          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
3813        }
3814
3815        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstanceForType() {
3816          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
3817        }
3818
3819        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto build() {
3820          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = buildPartial();
3821          if (!result.isInitialized()) {
3822            throw newUninitializedMessageException(result);
3823          }
3824          return result;
3825        }
3826
3827        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto buildPartial() {
3828          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto(this);
3829          int from_bitField0_ = bitField0_;
3830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3831            entries_ = java.util.Collections.unmodifiableList(entries_);
3832            bitField0_ = (bitField0_ & ~0x00000001);
3833          }
3834          result.entries_ = entries_;
3835          onBuilt();
3836          return result;
3837        }
3838
3839        public Builder mergeFrom(com.google.protobuf.Message other) {
3840          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) {
3841            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto)other);
3842          } else {
3843            super.mergeFrom(other);
3844            return this;
3845          }
3846        }
3847
3848        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto other) {
3849          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) return this;
3850          if (!other.entries_.isEmpty()) {
3851            if (entries_.isEmpty()) {
3852              entries_ = other.entries_;
3853              bitField0_ = (bitField0_ & ~0x00000001);
3854            } else {
3855              ensureEntriesIsMutable();
3856              entries_.addAll(other.entries_);
3857            }
3858            onChanged();
3859          }
3860          this.mergeUnknownFields(other.getUnknownFields());
3861          return this;
3862        }
3863
3864        public final boolean isInitialized() {
3865          return true;
3866        }
3867
3868        public Builder mergeFrom(
3869            com.google.protobuf.CodedInputStream input,
3870            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3871            throws java.io.IOException {
3872          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parsedMessage = null;
3873          try {
3874            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3875          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3876            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) e.getUnfinishedMessage();
3877            throw e;
3878          } finally {
3879            if (parsedMessage != null) {
3880              mergeFrom(parsedMessage);
3881            }
3882          }
3883          return this;
3884        }
3885        private int bitField0_;
3886
3887        // repeated fixed32 entries = 2 [packed = true];
3888        private java.util.List<java.lang.Integer> entries_ = java.util.Collections.emptyList();
3889        private void ensureEntriesIsMutable() {
3890          if (!((bitField0_ & 0x00000001) == 0x00000001)) {
3891            entries_ = new java.util.ArrayList<java.lang.Integer>(entries_);
3892            bitField0_ |= 0x00000001;
3893           }
3894        }
3895        /**
3896         * <code>repeated fixed32 entries = 2 [packed = true];</code>
3897         *
3898         * <pre>
3899         **
3900         * An ACL entry is represented by a 32-bit integer in Big Endian
3901         * format. The bits can be divided in four segments:
3902         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3903         *
3904         * [0:2) -- reserved for futute uses.
3905         * [2:26) -- the name of the entry, which is an ID that points to a
3906         * string in the StringTableSection.
3907         * [26:27) -- the scope of the entry (AclEntryScopeProto)
3908         * [27:29) -- the type of the entry (AclEntryTypeProto)
3909         * [29:32) -- the permission of the entry (FsActionProto)
3910         * </pre>
3911         */
3912        public java.util.List<java.lang.Integer>
3913            getEntriesList() {
3914          return java.util.Collections.unmodifiableList(entries_);
3915        }
3916        /**
3917         * <code>repeated fixed32 entries = 2 [packed = true];</code>
3918         *
3919         * <pre>
3920         **
3921         * An ACL entry is represented by a 32-bit integer in Big Endian
3922         * format. The bits can be divided in four segments:
3923         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3924         *
3925         * [0:2) -- reserved for futute uses.
3926         * [2:26) -- the name of the entry, which is an ID that points to a
3927         * string in the StringTableSection.
3928         * [26:27) -- the scope of the entry (AclEntryScopeProto)
3929         * [27:29) -- the type of the entry (AclEntryTypeProto)
3930         * [29:32) -- the permission of the entry (FsActionProto)
3931         * </pre>
3932         */
3933        public int getEntriesCount() {
3934          return entries_.size();
3935        }
3936        /**
3937         * <code>repeated fixed32 entries = 2 [packed = true];</code>
3938         *
3939         * <pre>
3940         **
3941         * An ACL entry is represented by a 32-bit integer in Big Endian
3942         * format. The bits can be divided in four segments:
3943         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3944         *
3945         * [0:2) -- reserved for futute uses.
3946         * [2:26) -- the name of the entry, which is an ID that points to a
3947         * string in the StringTableSection.
3948         * [26:27) -- the scope of the entry (AclEntryScopeProto)
3949         * [27:29) -- the type of the entry (AclEntryTypeProto)
3950         * [29:32) -- the permission of the entry (FsActionProto)
3951         * </pre>
3952         */
3953        public int getEntries(int index) {
3954          return entries_.get(index);
3955        }
3956        /**
3957         * <code>repeated fixed32 entries = 2 [packed = true];</code>
3958         *
3959         * <pre>
3960         **
3961         * An ACL entry is represented by a 32-bit integer in Big Endian
3962         * format. The bits can be divided in four segments:
3963         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3964         *
3965         * [0:2) -- reserved for futute uses.
3966         * [2:26) -- the name of the entry, which is an ID that points to a
3967         * string in the StringTableSection.
3968         * [26:27) -- the scope of the entry (AclEntryScopeProto)
3969         * [27:29) -- the type of the entry (AclEntryTypeProto)
3970         * [29:32) -- the permission of the entry (FsActionProto)
3971         * </pre>
3972         */
3973        public Builder setEntries(
3974            int index, int value) {
3975          ensureEntriesIsMutable();
3976          entries_.set(index, value);
3977          onChanged();
3978          return this;
3979        }
3980        /**
3981         * <code>repeated fixed32 entries = 2 [packed = true];</code>
3982         *
3983         * <pre>
3984         **
3985         * An ACL entry is represented by a 32-bit integer in Big Endian
3986         * format. The bits can be divided in four segments:
3987         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
3988         *
3989         * [0:2) -- reserved for futute uses.
3990         * [2:26) -- the name of the entry, which is an ID that points to a
3991         * string in the StringTableSection.
3992         * [26:27) -- the scope of the entry (AclEntryScopeProto)
3993         * [27:29) -- the type of the entry (AclEntryTypeProto)
3994         * [29:32) -- the permission of the entry (FsActionProto)
3995         * </pre>
3996         */
3997        public Builder addEntries(int value) {
3998          ensureEntriesIsMutable();
3999          entries_.add(value);
4000          onChanged();
4001          return this;
4002        }
4003        /**
4004         * <code>repeated fixed32 entries = 2 [packed = true];</code>
4005         *
4006         * <pre>
4007         **
4008         * An ACL entry is represented by a 32-bit integer in Big Endian
4009         * format. The bits can be divided in four segments:
4010         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
4011         *
4012         * [0:2) -- reserved for futute uses.
4013         * [2:26) -- the name of the entry, which is an ID that points to a
4014         * string in the StringTableSection.
4015         * [26:27) -- the scope of the entry (AclEntryScopeProto)
4016         * [27:29) -- the type of the entry (AclEntryTypeProto)
4017         * [29:32) -- the permission of the entry (FsActionProto)
4018         * </pre>
4019         */
4020        public Builder addAllEntries(
4021            java.lang.Iterable<? extends java.lang.Integer> values) {
4022          ensureEntriesIsMutable();
4023          super.addAll(values, entries_);
4024          onChanged();
4025          return this;
4026        }
4027        /**
4028         * <code>repeated fixed32 entries = 2 [packed = true];</code>
4029         *
4030         * <pre>
4031         **
4032         * An ACL entry is represented by a 32-bit integer in Big Endian
4033         * format. The bits can be divided in four segments:
4034         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
4035         *
4036         * [0:2) -- reserved for futute uses.
4037         * [2:26) -- the name of the entry, which is an ID that points to a
4038         * string in the StringTableSection.
4039         * [26:27) -- the scope of the entry (AclEntryScopeProto)
4040         * [27:29) -- the type of the entry (AclEntryTypeProto)
4041         * [29:32) -- the permission of the entry (FsActionProto)
4042         * </pre>
4043         */
4044        public Builder clearEntries() {
4045          entries_ = java.util.Collections.emptyList();
4046          bitField0_ = (bitField0_ & ~0x00000001);
4047          onChanged();
4048          return this;
4049        }
4050
4051        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
4052      }
4053
4054      static {
4055        defaultInstance = new AclFeatureProto(true);
4056        defaultInstance.initFields();
4057      }
4058
4059      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
4060    }
4061
4062    public interface XAttrCompactProtoOrBuilder
4063        extends com.google.protobuf.MessageOrBuilder {
4064
4065      // required fixed32 name = 1;
4066      /**
4067       * <code>required fixed32 name = 1;</code>
4068       *
4069       * <pre>
4070       **
4071       * 
4072       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4073       * [2:26) -- the name of the entry, which is an ID that points to a
4074       * string in the StringTableSection.
4075       * [26:27) -- namespace extension. Originally there were only 4 namespaces
4076       * so only 2 bits were needed. At that time, this bit was reserved. When a
4077       * 5th namespace was created (raw) this bit became used as a 3rd namespace
4078       * bit.
4079       * [27:32) -- reserved for future uses.
4080       * </pre>
4081       */
4082      boolean hasName();
4083      /**
4084       * <code>required fixed32 name = 1;</code>
4085       *
4086       * <pre>
4087       **
4088       * 
4089       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4090       * [2:26) -- the name of the entry, which is an ID that points to a
4091       * string in the StringTableSection.
4092       * [26:27) -- namespace extension. Originally there were only 4 namespaces
4093       * so only 2 bits were needed. At that time, this bit was reserved. When a
4094       * 5th namespace was created (raw) this bit became used as a 3rd namespace
4095       * bit.
4096       * [27:32) -- reserved for future uses.
4097       * </pre>
4098       */
4099      int getName();
4100
4101      // optional bytes value = 2;
4102      /**
4103       * <code>optional bytes value = 2;</code>
4104       */
4105      boolean hasValue();
4106      /**
4107       * <code>optional bytes value = 2;</code>
4108       */
4109      com.google.protobuf.ByteString getValue();
4110    }
4111    /**
4112     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto}
4113     */
4114    public static final class XAttrCompactProto extends
4115        com.google.protobuf.GeneratedMessage
4116        implements XAttrCompactProtoOrBuilder {
4117      // Use XAttrCompactProto.newBuilder() to construct.
4118      private XAttrCompactProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4119        super(builder);
4120        this.unknownFields = builder.getUnknownFields();
4121      }
4122      private XAttrCompactProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4123
4124      private static final XAttrCompactProto defaultInstance;
4125      public static XAttrCompactProto getDefaultInstance() {
4126        return defaultInstance;
4127      }
4128
4129      public XAttrCompactProto getDefaultInstanceForType() {
4130        return defaultInstance;
4131      }
4132
4133      private final com.google.protobuf.UnknownFieldSet unknownFields;
4134      @java.lang.Override
4135      public final com.google.protobuf.UnknownFieldSet
4136          getUnknownFields() {
4137        return this.unknownFields;
4138      }
4139      private XAttrCompactProto(
4140          com.google.protobuf.CodedInputStream input,
4141          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4142          throws com.google.protobuf.InvalidProtocolBufferException {
4143        initFields();
4144        int mutable_bitField0_ = 0;
4145        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4146            com.google.protobuf.UnknownFieldSet.newBuilder();
4147        try {
4148          boolean done = false;
4149          while (!done) {
4150            int tag = input.readTag();
4151            switch (tag) {
4152              case 0:
4153                done = true;
4154                break;
4155              default: {
4156                if (!parseUnknownField(input, unknownFields,
4157                                       extensionRegistry, tag)) {
4158                  done = true;
4159                }
4160                break;
4161              }
4162              case 13: {
4163                bitField0_ |= 0x00000001;
4164                name_ = input.readFixed32();
4165                break;
4166              }
4167              case 18: {
4168                bitField0_ |= 0x00000002;
4169                value_ = input.readBytes();
4170                break;
4171              }
4172            }
4173          }
4174        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4175          throw e.setUnfinishedMessage(this);
4176        } catch (java.io.IOException e) {
4177          throw new com.google.protobuf.InvalidProtocolBufferException(
4178              e.getMessage()).setUnfinishedMessage(this);
4179        } finally {
4180          this.unknownFields = unknownFields.build();
4181          makeExtensionsImmutable();
4182        }
4183      }
4184      public static final com.google.protobuf.Descriptors.Descriptor
4185          getDescriptor() {
4186        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
4187      }
4188
4189      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4190          internalGetFieldAccessorTable() {
4191        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable
4192            .ensureFieldAccessorsInitialized(
4193                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class);
4194      }
4195
4196      public static com.google.protobuf.Parser<XAttrCompactProto> PARSER =
4197          new com.google.protobuf.AbstractParser<XAttrCompactProto>() {
4198        public XAttrCompactProto parsePartialFrom(
4199            com.google.protobuf.CodedInputStream input,
4200            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4201            throws com.google.protobuf.InvalidProtocolBufferException {
4202          return new XAttrCompactProto(input, extensionRegistry);
4203        }
4204      };
4205
4206      @java.lang.Override
4207      public com.google.protobuf.Parser<XAttrCompactProto> getParserForType() {
4208        return PARSER;
4209      }
4210
4211      private int bitField0_;
4212      // required fixed32 name = 1;
4213      public static final int NAME_FIELD_NUMBER = 1;
4214      private int name_;
4215      /**
4216       * <code>required fixed32 name = 1;</code>
4217       *
4218       * <pre>
4219       **
4220       * 
4221       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4222       * [2:26) -- the name of the entry, which is an ID that points to a
4223       * string in the StringTableSection.
4224       * [26:27) -- namespace extension. Originally there were only 4 namespaces
4225       * so only 2 bits were needed. At that time, this bit was reserved. When a
4226       * 5th namespace was created (raw) this bit became used as a 3rd namespace
4227       * bit.
4228       * [27:32) -- reserved for future uses.
4229       * </pre>
4230       */
4231      public boolean hasName() {
4232        return ((bitField0_ & 0x00000001) == 0x00000001);
4233      }
4234      /**
4235       * <code>required fixed32 name = 1;</code>
4236       *
4237       * <pre>
4238       **
4239       * 
4240       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4241       * [2:26) -- the name of the entry, which is an ID that points to a
4242       * string in the StringTableSection.
4243       * [26:27) -- namespace extension. Originally there were only 4 namespaces
4244       * so only 2 bits were needed. At that time, this bit was reserved. When a
4245       * 5th namespace was created (raw) this bit became used as a 3rd namespace
4246       * bit.
4247       * [27:32) -- reserved for future uses.
4248       * </pre>
4249       */
4250      public int getName() {
4251        return name_;
4252      }
4253
4254      // optional bytes value = 2;
4255      public static final int VALUE_FIELD_NUMBER = 2;
4256      private com.google.protobuf.ByteString value_;
4257      /**
4258       * <code>optional bytes value = 2;</code>
4259       */
4260      public boolean hasValue() {
4261        return ((bitField0_ & 0x00000002) == 0x00000002);
4262      }
4263      /**
4264       * <code>optional bytes value = 2;</code>
4265       */
4266      public com.google.protobuf.ByteString getValue() {
4267        return value_;
4268      }
4269
4270      private void initFields() {
4271        name_ = 0;
4272        value_ = com.google.protobuf.ByteString.EMPTY;
4273      }
4274      private byte memoizedIsInitialized = -1;
4275      public final boolean isInitialized() {
4276        byte isInitialized = memoizedIsInitialized;
4277        if (isInitialized != -1) return isInitialized == 1;
4278
4279        if (!hasName()) {
4280          memoizedIsInitialized = 0;
4281          return false;
4282        }
4283        memoizedIsInitialized = 1;
4284        return true;
4285      }
4286
4287      public void writeTo(com.google.protobuf.CodedOutputStream output)
4288                          throws java.io.IOException {
4289        getSerializedSize();
4290        if (((bitField0_ & 0x00000001) == 0x00000001)) {
4291          output.writeFixed32(1, name_);
4292        }
4293        if (((bitField0_ & 0x00000002) == 0x00000002)) {
4294          output.writeBytes(2, value_);
4295        }
4296        getUnknownFields().writeTo(output);
4297      }
4298
4299      private int memoizedSerializedSize = -1;
4300      public int getSerializedSize() {
4301        int size = memoizedSerializedSize;
4302        if (size != -1) return size;
4303
4304        size = 0;
4305        if (((bitField0_ & 0x00000001) == 0x00000001)) {
4306          size += com.google.protobuf.CodedOutputStream
4307            .computeFixed32Size(1, name_);
4308        }
4309        if (((bitField0_ & 0x00000002) == 0x00000002)) {
4310          size += com.google.protobuf.CodedOutputStream
4311            .computeBytesSize(2, value_);
4312        }
4313        size += getUnknownFields().getSerializedSize();
4314        memoizedSerializedSize = size;
4315        return size;
4316      }
4317
4318      private static final long serialVersionUID = 0L;
4319      @java.lang.Override
4320      protected java.lang.Object writeReplace()
4321          throws java.io.ObjectStreamException {
4322        return super.writeReplace();
4323      }
4324
4325      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4326          com.google.protobuf.ByteString data)
4327          throws com.google.protobuf.InvalidProtocolBufferException {
4328        return PARSER.parseFrom(data);
4329      }
4330      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4331          com.google.protobuf.ByteString data,
4332          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4333          throws com.google.protobuf.InvalidProtocolBufferException {
4334        return PARSER.parseFrom(data, extensionRegistry);
4335      }
4336      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(byte[] data)
4337          throws com.google.protobuf.InvalidProtocolBufferException {
4338        return PARSER.parseFrom(data);
4339      }
4340      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4341          byte[] data,
4342          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4343          throws com.google.protobuf.InvalidProtocolBufferException {
4344        return PARSER.parseFrom(data, extensionRegistry);
4345      }
4346      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(java.io.InputStream input)
4347          throws java.io.IOException {
4348        return PARSER.parseFrom(input);
4349      }
4350      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4351          java.io.InputStream input,
4352          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4353          throws java.io.IOException {
4354        return PARSER.parseFrom(input, extensionRegistry);
4355      }
4356      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom(java.io.InputStream input)
4357          throws java.io.IOException {
4358        return PARSER.parseDelimitedFrom(input);
4359      }
4360      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom(
4361          java.io.InputStream input,
4362          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4363          throws java.io.IOException {
4364        return PARSER.parseDelimitedFrom(input, extensionRegistry);
4365      }
4366      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4367          com.google.protobuf.CodedInputStream input)
4368          throws java.io.IOException {
4369        return PARSER.parseFrom(input);
4370      }
4371      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
4372          com.google.protobuf.CodedInputStream input,
4373          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4374          throws java.io.IOException {
4375        return PARSER.parseFrom(input, extensionRegistry);
4376      }
4377
4378      public static Builder newBuilder() { return Builder.create(); }
4379      public Builder newBuilderForType() { return newBuilder(); }
4380      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto prototype) {
4381        return newBuilder().mergeFrom(prototype);
4382      }
4383      public Builder toBuilder() { return newBuilder(this); }
4384
4385      @java.lang.Override
4386      protected Builder newBuilderForType(
4387          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4388        Builder builder = new Builder(parent);
4389        return builder;
4390      }
4391      /**
4392       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto}
4393       */
4394      public static final class Builder extends
4395          com.google.protobuf.GeneratedMessage.Builder<Builder>
4396         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder {
4397        public static final com.google.protobuf.Descriptors.Descriptor
4398            getDescriptor() {
4399          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
4400        }
4401
4402        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4403            internalGetFieldAccessorTable() {
4404          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable
4405              .ensureFieldAccessorsInitialized(
4406                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class);
4407        }
4408
4409        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.newBuilder()
4410        private Builder() {
4411          maybeForceBuilderInitialization();
4412        }
4413
4414        private Builder(
4415            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4416          super(parent);
4417          maybeForceBuilderInitialization();
4418        }
4419        private void maybeForceBuilderInitialization() {
4420          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4421          }
4422        }
4423        private static Builder create() {
4424          return new Builder();
4425        }
4426
4427        public Builder clear() {
4428          super.clear();
4429          name_ = 0;
4430          bitField0_ = (bitField0_ & ~0x00000001);
4431          value_ = com.google.protobuf.ByteString.EMPTY;
4432          bitField0_ = (bitField0_ & ~0x00000002);
4433          return this;
4434        }
4435
4436        public Builder clone() {
4437          return create().mergeFrom(buildPartial());
4438        }
4439
4440        public com.google.protobuf.Descriptors.Descriptor
4441            getDescriptorForType() {
4442          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
4443        }
4444
4445        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstanceForType() {
4446          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance();
4447        }
4448
4449        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto build() {
4450          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = buildPartial();
4451          if (!result.isInitialized()) {
4452            throw newUninitializedMessageException(result);
4453          }
4454          return result;
4455        }
4456
4457        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto buildPartial() {
4458          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto(this);
4459          int from_bitField0_ = bitField0_;
4460          int to_bitField0_ = 0;
4461          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4462            to_bitField0_ |= 0x00000001;
4463          }
4464          result.name_ = name_;
4465          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
4466            to_bitField0_ |= 0x00000002;
4467          }
4468          result.value_ = value_;
4469          result.bitField0_ = to_bitField0_;
4470          onBuilt();
4471          return result;
4472        }
4473
4474        public Builder mergeFrom(com.google.protobuf.Message other) {
4475          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) {
4476            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto)other);
4477          } else {
4478            super.mergeFrom(other);
4479            return this;
4480          }
4481        }
4482
4483        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto other) {
4484          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance()) return this;
4485          if (other.hasName()) {
4486            setName(other.getName());
4487          }
4488          if (other.hasValue()) {
4489            setValue(other.getValue());
4490          }
4491          this.mergeUnknownFields(other.getUnknownFields());
4492          return this;
4493        }
4494
4495        public final boolean isInitialized() {
4496          if (!hasName()) {
4497            
4498            return false;
4499          }
4500          return true;
4501        }
4502
4503        public Builder mergeFrom(
4504            com.google.protobuf.CodedInputStream input,
4505            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4506            throws java.io.IOException {
4507          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parsedMessage = null;
4508          try {
4509            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4510          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4511            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) e.getUnfinishedMessage();
4512            throw e;
4513          } finally {
4514            if (parsedMessage != null) {
4515              mergeFrom(parsedMessage);
4516            }
4517          }
4518          return this;
4519        }
4520        private int bitField0_;
4521
4522        // required fixed32 name = 1;
4523        private int name_ ;
4524        /**
4525         * <code>required fixed32 name = 1;</code>
4526         *
4527         * <pre>
4528         **
4529         * 
4530         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4531         * [2:26) -- the name of the entry, which is an ID that points to a
4532         * string in the StringTableSection.
4533         * [26:27) -- namespace extension. Originally there were only 4 namespaces
4534         * so only 2 bits were needed. At that time, this bit was reserved. When a
4535         * 5th namespace was created (raw) this bit became used as a 3rd namespace
4536         * bit.
4537         * [27:32) -- reserved for future uses.
4538         * </pre>
4539         */
4540        public boolean hasName() {
4541          return ((bitField0_ & 0x00000001) == 0x00000001);
4542        }
4543        /**
4544         * <code>required fixed32 name = 1;</code>
4545         *
4546         * <pre>
4547         **
4548         * 
4549         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4550         * [2:26) -- the name of the entry, which is an ID that points to a
4551         * string in the StringTableSection.
4552         * [26:27) -- namespace extension. Originally there were only 4 namespaces
4553         * so only 2 bits were needed. At that time, this bit was reserved. When a
4554         * 5th namespace was created (raw) this bit became used as a 3rd namespace
4555         * bit.
4556         * [27:32) -- reserved for future uses.
4557         * </pre>
4558         */
4559        public int getName() {
4560          return name_;
4561        }
4562        /**
4563         * <code>required fixed32 name = 1;</code>
4564         *
4565         * <pre>
4566         **
4567         * 
4568         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4569         * [2:26) -- the name of the entry, which is an ID that points to a
4570         * string in the StringTableSection.
4571         * [26:27) -- namespace extension. Originally there were only 4 namespaces
4572         * so only 2 bits were needed. At that time, this bit was reserved. When a
4573         * 5th namespace was created (raw) this bit became used as a 3rd namespace
4574         * bit.
4575         * [27:32) -- reserved for future uses.
4576         * </pre>
4577         */
4578        public Builder setName(int value) {
4579          bitField0_ |= 0x00000001;
4580          name_ = value;
4581          onChanged();
4582          return this;
4583        }
4584        /**
4585         * <code>required fixed32 name = 1;</code>
4586         *
4587         * <pre>
4588         **
4589         * 
4590         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
4591         * [2:26) -- the name of the entry, which is an ID that points to a
4592         * string in the StringTableSection.
4593         * [26:27) -- namespace extension. Originally there were only 4 namespaces
4594         * so only 2 bits were needed. At that time, this bit was reserved. When a
4595         * 5th namespace was created (raw) this bit became used as a 3rd namespace
4596         * bit.
4597         * [27:32) -- reserved for future uses.
4598         * </pre>
4599         */
4600        public Builder clearName() {
4601          bitField0_ = (bitField0_ & ~0x00000001);
4602          name_ = 0;
4603          onChanged();
4604          return this;
4605        }
4606
4607        // optional bytes value = 2;
4608        private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY;
4609        /**
4610         * <code>optional bytes value = 2;</code>
4611         */
4612        public boolean hasValue() {
4613          return ((bitField0_ & 0x00000002) == 0x00000002);
4614        }
4615        /**
4616         * <code>optional bytes value = 2;</code>
4617         */
4618        public com.google.protobuf.ByteString getValue() {
4619          return value_;
4620        }
4621        /**
4622         * <code>optional bytes value = 2;</code>
4623         */
4624        public Builder setValue(com.google.protobuf.ByteString value) {
4625          if (value == null) {
4626    throw new NullPointerException();
4627  }
4628  bitField0_ |= 0x00000002;
4629          value_ = value;
4630          onChanged();
4631          return this;
4632        }
4633        /**
4634         * <code>optional bytes value = 2;</code>
4635         */
4636        public Builder clearValue() {
4637          bitField0_ = (bitField0_ & ~0x00000002);
4638          value_ = getDefaultInstance().getValue();
4639          onChanged();
4640          return this;
4641        }
4642
4643        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
4644      }
4645
4646      static {
4647        defaultInstance = new XAttrCompactProto(true);
4648        defaultInstance.initFields();
4649      }
4650
4651      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
4652    }
4653
4654    public interface XAttrFeatureProtoOrBuilder
4655        extends com.google.protobuf.MessageOrBuilder {
4656
4657      // repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;
4658      /**
4659       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4660       */
4661      java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> 
4662          getXAttrsList();
4663      /**
4664       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4665       */
4666      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index);
4667      /**
4668       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4669       */
4670      int getXAttrsCount();
4671      /**
4672       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4673       */
4674      java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
4675          getXAttrsOrBuilderList();
4676      /**
4677       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4678       */
4679      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
4680          int index);
4681    }
4682    /**
4683     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto}
4684     */
4685    public static final class XAttrFeatureProto extends
4686        com.google.protobuf.GeneratedMessage
4687        implements XAttrFeatureProtoOrBuilder {
4688      // Use XAttrFeatureProto.newBuilder() to construct.
4689      private XAttrFeatureProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4690        super(builder);
4691        this.unknownFields = builder.getUnknownFields();
4692      }
4693      private XAttrFeatureProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4694
4695      private static final XAttrFeatureProto defaultInstance;
4696      public static XAttrFeatureProto getDefaultInstance() {
4697        return defaultInstance;
4698      }
4699
4700      public XAttrFeatureProto getDefaultInstanceForType() {
4701        return defaultInstance;
4702      }
4703
4704      private final com.google.protobuf.UnknownFieldSet unknownFields;
4705      @java.lang.Override
4706      public final com.google.protobuf.UnknownFieldSet
4707          getUnknownFields() {
4708        return this.unknownFields;
4709      }
4710      private XAttrFeatureProto(
4711          com.google.protobuf.CodedInputStream input,
4712          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4713          throws com.google.protobuf.InvalidProtocolBufferException {
4714        initFields();
4715        int mutable_bitField0_ = 0;
4716        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4717            com.google.protobuf.UnknownFieldSet.newBuilder();
4718        try {
4719          boolean done = false;
4720          while (!done) {
4721            int tag = input.readTag();
4722            switch (tag) {
4723              case 0:
4724                done = true;
4725                break;
4726              default: {
4727                if (!parseUnknownField(input, unknownFields,
4728                                       extensionRegistry, tag)) {
4729                  done = true;
4730                }
4731                break;
4732              }
4733              case 10: {
4734                if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
4735                  xAttrs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto>();
4736                  mutable_bitField0_ |= 0x00000001;
4737                }
4738                xAttrs_.add(input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.PARSER, extensionRegistry));
4739                break;
4740              }
4741            }
4742          }
4743        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4744          throw e.setUnfinishedMessage(this);
4745        } catch (java.io.IOException e) {
4746          throw new com.google.protobuf.InvalidProtocolBufferException(
4747              e.getMessage()).setUnfinishedMessage(this);
4748        } finally {
4749          if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
4750            xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_);
4751          }
4752          this.unknownFields = unknownFields.build();
4753          makeExtensionsImmutable();
4754        }
4755      }
4756      public static final com.google.protobuf.Descriptors.Descriptor
4757          getDescriptor() {
4758        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
4759      }
4760
4761      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4762          internalGetFieldAccessorTable() {
4763        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable
4764            .ensureFieldAccessorsInitialized(
4765                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class);
4766      }
4767
4768      public static com.google.protobuf.Parser<XAttrFeatureProto> PARSER =
4769          new com.google.protobuf.AbstractParser<XAttrFeatureProto>() {
4770        public XAttrFeatureProto parsePartialFrom(
4771            com.google.protobuf.CodedInputStream input,
4772            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4773            throws com.google.protobuf.InvalidProtocolBufferException {
4774          return new XAttrFeatureProto(input, extensionRegistry);
4775        }
4776      };
4777
4778      @java.lang.Override
4779      public com.google.protobuf.Parser<XAttrFeatureProto> getParserForType() {
4780        return PARSER;
4781      }
4782
4783      // repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;
4784      public static final int XATTRS_FIELD_NUMBER = 1;
4785      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> xAttrs_;
4786      /**
4787       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4788       */
4789      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> getXAttrsList() {
4790        return xAttrs_;
4791      }
4792      /**
4793       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4794       */
4795      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
4796          getXAttrsOrBuilderList() {
4797        return xAttrs_;
4798      }
4799      /**
4800       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4801       */
4802      public int getXAttrsCount() {
4803        return xAttrs_.size();
4804      }
4805      /**
4806       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4807       */
4808      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) {
4809        return xAttrs_.get(index);
4810      }
4811      /**
4812       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
4813       */
4814      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
4815          int index) {
4816        return xAttrs_.get(index);
4817      }
4818
4819      private void initFields() {
4820        xAttrs_ = java.util.Collections.emptyList();
4821      }
4822      private byte memoizedIsInitialized = -1;
4823      public final boolean isInitialized() {
4824        byte isInitialized = memoizedIsInitialized;
4825        if (isInitialized != -1) return isInitialized == 1;
4826
4827        for (int i = 0; i < getXAttrsCount(); i++) {
4828          if (!getXAttrs(i).isInitialized()) {
4829            memoizedIsInitialized = 0;
4830            return false;
4831          }
4832        }
4833        memoizedIsInitialized = 1;
4834        return true;
4835      }
4836
4837      public void writeTo(com.google.protobuf.CodedOutputStream output)
4838                          throws java.io.IOException {
4839        getSerializedSize();
4840        for (int i = 0; i < xAttrs_.size(); i++) {
4841          output.writeMessage(1, xAttrs_.get(i));
4842        }
4843        getUnknownFields().writeTo(output);
4844      }
4845
4846      private int memoizedSerializedSize = -1;
4847      public int getSerializedSize() {
4848        int size = memoizedSerializedSize;
4849        if (size != -1) return size;
4850
4851        size = 0;
4852        for (int i = 0; i < xAttrs_.size(); i++) {
4853          size += com.google.protobuf.CodedOutputStream
4854            .computeMessageSize(1, xAttrs_.get(i));
4855        }
4856        size += getUnknownFields().getSerializedSize();
4857        memoizedSerializedSize = size;
4858        return size;
4859      }
4860
4861      private static final long serialVersionUID = 0L;
4862      @java.lang.Override
4863      protected java.lang.Object writeReplace()
4864          throws java.io.ObjectStreamException {
4865        return super.writeReplace();
4866      }
4867
4868      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4869          com.google.protobuf.ByteString data)
4870          throws com.google.protobuf.InvalidProtocolBufferException {
4871        return PARSER.parseFrom(data);
4872      }
4873      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4874          com.google.protobuf.ByteString data,
4875          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4876          throws com.google.protobuf.InvalidProtocolBufferException {
4877        return PARSER.parseFrom(data, extensionRegistry);
4878      }
4879      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(byte[] data)
4880          throws com.google.protobuf.InvalidProtocolBufferException {
4881        return PARSER.parseFrom(data);
4882      }
4883      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4884          byte[] data,
4885          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4886          throws com.google.protobuf.InvalidProtocolBufferException {
4887        return PARSER.parseFrom(data, extensionRegistry);
4888      }
4889      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(java.io.InputStream input)
4890          throws java.io.IOException {
4891        return PARSER.parseFrom(input);
4892      }
4893      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4894          java.io.InputStream input,
4895          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4896          throws java.io.IOException {
4897        return PARSER.parseFrom(input, extensionRegistry);
4898      }
4899      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom(java.io.InputStream input)
4900          throws java.io.IOException {
4901        return PARSER.parseDelimitedFrom(input);
4902      }
4903      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom(
4904          java.io.InputStream input,
4905          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4906          throws java.io.IOException {
4907        return PARSER.parseDelimitedFrom(input, extensionRegistry);
4908      }
4909      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4910          com.google.protobuf.CodedInputStream input)
4911          throws java.io.IOException {
4912        return PARSER.parseFrom(input);
4913      }
4914      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
4915          com.google.protobuf.CodedInputStream input,
4916          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4917          throws java.io.IOException {
4918        return PARSER.parseFrom(input, extensionRegistry);
4919      }
4920
4921      public static Builder newBuilder() { return Builder.create(); }
4922      public Builder newBuilderForType() { return newBuilder(); }
4923      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto prototype) {
4924        return newBuilder().mergeFrom(prototype);
4925      }
4926      public Builder toBuilder() { return newBuilder(this); }
4927
4928      @java.lang.Override
4929      protected Builder newBuilderForType(
4930          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4931        Builder builder = new Builder(parent);
4932        return builder;
4933      }
4934      /**
4935       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto}
4936       */
4937      public static final class Builder extends
4938          com.google.protobuf.GeneratedMessage.Builder<Builder>
4939         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder {
4940        public static final com.google.protobuf.Descriptors.Descriptor
4941            getDescriptor() {
4942          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
4943        }
4944
4945        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4946            internalGetFieldAccessorTable() {
4947          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable
4948              .ensureFieldAccessorsInitialized(
4949                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class);
4950        }
4951
4952        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder()
4953        private Builder() {
4954          maybeForceBuilderInitialization();
4955        }
4956
4957        private Builder(
4958            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4959          super(parent);
4960          maybeForceBuilderInitialization();
4961        }
4962        private void maybeForceBuilderInitialization() {
4963          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4964            getXAttrsFieldBuilder();
4965          }
4966        }
4967        private static Builder create() {
4968          return new Builder();
4969        }
4970
4971        public Builder clear() {
4972          super.clear();
4973          if (xAttrsBuilder_ == null) {
4974            xAttrs_ = java.util.Collections.emptyList();
4975            bitField0_ = (bitField0_ & ~0x00000001);
4976          } else {
4977            xAttrsBuilder_.clear();
4978          }
4979          return this;
4980        }
4981
4982        public Builder clone() {
4983          return create().mergeFrom(buildPartial());
4984        }
4985
4986        public com.google.protobuf.Descriptors.Descriptor
4987            getDescriptorForType() {
4988          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
4989        }
4990
4991        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstanceForType() {
4992          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
4993        }
4994
4995        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto build() {
4996          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = buildPartial();
4997          if (!result.isInitialized()) {
4998            throw newUninitializedMessageException(result);
4999          }
5000          return result;
5001        }
5002
5003        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto buildPartial() {
5004          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto(this);
5005          int from_bitField0_ = bitField0_;
5006          if (xAttrsBuilder_ == null) {
5007            if (((bitField0_ & 0x00000001) == 0x00000001)) {
5008              xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_);
5009              bitField0_ = (bitField0_ & ~0x00000001);
5010            }
5011            result.xAttrs_ = xAttrs_;
5012          } else {
5013            result.xAttrs_ = xAttrsBuilder_.build();
5014          }
5015          onBuilt();
5016          return result;
5017        }
5018
5019        public Builder mergeFrom(com.google.protobuf.Message other) {
5020          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) {
5021            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto)other);
5022          } else {
5023            super.mergeFrom(other);
5024            return this;
5025          }
5026        }
5027
5028        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto other) {
5029          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) return this;
5030          if (xAttrsBuilder_ == null) {
5031            if (!other.xAttrs_.isEmpty()) {
5032              if (xAttrs_.isEmpty()) {
5033                xAttrs_ = other.xAttrs_;
5034                bitField0_ = (bitField0_ & ~0x00000001);
5035              } else {
5036                ensureXAttrsIsMutable();
5037                xAttrs_.addAll(other.xAttrs_);
5038              }
5039              onChanged();
5040            }
5041          } else {
5042            if (!other.xAttrs_.isEmpty()) {
5043              if (xAttrsBuilder_.isEmpty()) {
5044                xAttrsBuilder_.dispose();
5045                xAttrsBuilder_ = null;
5046                xAttrs_ = other.xAttrs_;
5047                bitField0_ = (bitField0_ & ~0x00000001);
5048                xAttrsBuilder_ = 
5049                  com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
5050                     getXAttrsFieldBuilder() : null;
5051              } else {
5052                xAttrsBuilder_.addAllMessages(other.xAttrs_);
5053              }
5054            }
5055          }
5056          this.mergeUnknownFields(other.getUnknownFields());
5057          return this;
5058        }
5059
5060        public final boolean isInitialized() {
5061          for (int i = 0; i < getXAttrsCount(); i++) {
5062            if (!getXAttrs(i).isInitialized()) {
5063              
5064              return false;
5065            }
5066          }
5067          return true;
5068        }
5069
5070        public Builder mergeFrom(
5071            com.google.protobuf.CodedInputStream input,
5072            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5073            throws java.io.IOException {
5074          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parsedMessage = null;
5075          try {
5076            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5077          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5078            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) e.getUnfinishedMessage();
5079            throw e;
5080          } finally {
5081            if (parsedMessage != null) {
5082              mergeFrom(parsedMessage);
5083            }
5084          }
5085          return this;
5086        }
5087        private int bitField0_;
5088
5089        // repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;
5090        private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> xAttrs_ =
5091          java.util.Collections.emptyList();
5092        private void ensureXAttrsIsMutable() {
5093          if (!((bitField0_ & 0x00000001) == 0x00000001)) {
5094            xAttrs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto>(xAttrs_);
5095            bitField0_ |= 0x00000001;
5096           }
5097        }
5098
5099        private com.google.protobuf.RepeatedFieldBuilder<
5100            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> xAttrsBuilder_;
5101
5102        /**
5103         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5104         */
5105        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> getXAttrsList() {
5106          if (xAttrsBuilder_ == null) {
5107            return java.util.Collections.unmodifiableList(xAttrs_);
5108          } else {
5109            return xAttrsBuilder_.getMessageList();
5110          }
5111        }
5112        /**
5113         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5114         */
5115        public int getXAttrsCount() {
5116          if (xAttrsBuilder_ == null) {
5117            return xAttrs_.size();
5118          } else {
5119            return xAttrsBuilder_.getCount();
5120          }
5121        }
5122        /**
5123         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5124         */
5125        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) {
5126          if (xAttrsBuilder_ == null) {
5127            return xAttrs_.get(index);
5128          } else {
5129            return xAttrsBuilder_.getMessage(index);
5130          }
5131        }
5132        /**
5133         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5134         */
5135        public Builder setXAttrs(
5136            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
5137          if (xAttrsBuilder_ == null) {
5138            if (value == null) {
5139              throw new NullPointerException();
5140            }
5141            ensureXAttrsIsMutable();
5142            xAttrs_.set(index, value);
5143            onChanged();
5144          } else {
5145            xAttrsBuilder_.setMessage(index, value);
5146          }
5147          return this;
5148        }
5149        /**
5150         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5151         */
5152        public Builder setXAttrs(
5153            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
5154          if (xAttrsBuilder_ == null) {
5155            ensureXAttrsIsMutable();
5156            xAttrs_.set(index, builderForValue.build());
5157            onChanged();
5158          } else {
5159            xAttrsBuilder_.setMessage(index, builderForValue.build());
5160          }
5161          return this;
5162        }
5163        /**
5164         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5165         */
5166        public Builder addXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
5167          if (xAttrsBuilder_ == null) {
5168            if (value == null) {
5169              throw new NullPointerException();
5170            }
5171            ensureXAttrsIsMutable();
5172            xAttrs_.add(value);
5173            onChanged();
5174          } else {
5175            xAttrsBuilder_.addMessage(value);
5176          }
5177          return this;
5178        }
5179        /**
5180         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5181         */
5182        public Builder addXAttrs(
5183            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
5184          if (xAttrsBuilder_ == null) {
5185            if (value == null) {
5186              throw new NullPointerException();
5187            }
5188            ensureXAttrsIsMutable();
5189            xAttrs_.add(index, value);
5190            onChanged();
5191          } else {
5192            xAttrsBuilder_.addMessage(index, value);
5193          }
5194          return this;
5195        }
5196        /**
5197         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5198         */
5199        public Builder addXAttrs(
5200            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
5201          if (xAttrsBuilder_ == null) {
5202            ensureXAttrsIsMutable();
5203            xAttrs_.add(builderForValue.build());
5204            onChanged();
5205          } else {
5206            xAttrsBuilder_.addMessage(builderForValue.build());
5207          }
5208          return this;
5209        }
5210        /**
5211         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5212         */
5213        public Builder addXAttrs(
5214            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
5215          if (xAttrsBuilder_ == null) {
5216            ensureXAttrsIsMutable();
5217            xAttrs_.add(index, builderForValue.build());
5218            onChanged();
5219          } else {
5220            xAttrsBuilder_.addMessage(index, builderForValue.build());
5221          }
5222          return this;
5223        }
5224        /**
5225         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5226         */
5227        public Builder addAllXAttrs(
5228            java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> values) {
5229          if (xAttrsBuilder_ == null) {
5230            ensureXAttrsIsMutable();
5231            super.addAll(values, xAttrs_);
5232            onChanged();
5233          } else {
5234            xAttrsBuilder_.addAllMessages(values);
5235          }
5236          return this;
5237        }
5238        /**
5239         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5240         */
5241        public Builder clearXAttrs() {
5242          if (xAttrsBuilder_ == null) {
5243            xAttrs_ = java.util.Collections.emptyList();
5244            bitField0_ = (bitField0_ & ~0x00000001);
5245            onChanged();
5246          } else {
5247            xAttrsBuilder_.clear();
5248          }
5249          return this;
5250        }
5251        /**
5252         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5253         */
5254        public Builder removeXAttrs(int index) {
5255          if (xAttrsBuilder_ == null) {
5256            ensureXAttrsIsMutable();
5257            xAttrs_.remove(index);
5258            onChanged();
5259          } else {
5260            xAttrsBuilder_.remove(index);
5261          }
5262          return this;
5263        }
5264        /**
5265         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5266         */
5267        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder getXAttrsBuilder(
5268            int index) {
5269          return getXAttrsFieldBuilder().getBuilder(index);
5270        }
5271        /**
5272         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5273         */
5274        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
5275            int index) {
5276          if (xAttrsBuilder_ == null) {
5277            return xAttrs_.get(index);  } else {
5278            return xAttrsBuilder_.getMessageOrBuilder(index);
5279          }
5280        }
5281        /**
5282         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5283         */
5284        public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
5285             getXAttrsOrBuilderList() {
5286          if (xAttrsBuilder_ != null) {
5287            return xAttrsBuilder_.getMessageOrBuilderList();
5288          } else {
5289            return java.util.Collections.unmodifiableList(xAttrs_);
5290          }
5291        }
5292        /**
5293         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5294         */
5295        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder() {
5296          return getXAttrsFieldBuilder().addBuilder(
5297              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance());
5298        }
5299        /**
5300         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5301         */
5302        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder(
5303            int index) {
5304          return getXAttrsFieldBuilder().addBuilder(
5305              index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance());
5306        }
5307        /**
5308         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
5309         */
5310        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder> 
5311             getXAttrsBuilderList() {
5312          return getXAttrsFieldBuilder().getBuilderList();
5313        }
5314        private com.google.protobuf.RepeatedFieldBuilder<
5315            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
5316            getXAttrsFieldBuilder() {
5317          if (xAttrsBuilder_ == null) {
5318            xAttrsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
5319                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder>(
5320                    xAttrs_,
5321                    ((bitField0_ & 0x00000001) == 0x00000001),
5322                    getParentForChildren(),
5323                    isClean());
5324            xAttrs_ = null;
5325          }
5326          return xAttrsBuilder_;
5327        }
5328
5329        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
5330      }
5331
5332      static {
5333        defaultInstance = new XAttrFeatureProto(true);
5334        defaultInstance.initFields();
5335      }
5336
5337      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
5338    }
5339
5340    public interface INodeFileOrBuilder
5341        extends com.google.protobuf.MessageOrBuilder {
5342
5343      // optional uint32 replication = 1;
5344      /**
5345       * <code>optional uint32 replication = 1;</code>
5346       */
5347      boolean hasReplication();
5348      /**
5349       * <code>optional uint32 replication = 1;</code>
5350       */
5351      int getReplication();
5352
5353      // optional uint64 modificationTime = 2;
5354      /**
5355       * <code>optional uint64 modificationTime = 2;</code>
5356       */
5357      boolean hasModificationTime();
5358      /**
5359       * <code>optional uint64 modificationTime = 2;</code>
5360       */
5361      long getModificationTime();
5362
5363      // optional uint64 accessTime = 3;
5364      /**
5365       * <code>optional uint64 accessTime = 3;</code>
5366       */
5367      boolean hasAccessTime();
5368      /**
5369       * <code>optional uint64 accessTime = 3;</code>
5370       */
5371      long getAccessTime();
5372
5373      // optional uint64 preferredBlockSize = 4;
5374      /**
5375       * <code>optional uint64 preferredBlockSize = 4;</code>
5376       */
5377      boolean hasPreferredBlockSize();
5378      /**
5379       * <code>optional uint64 preferredBlockSize = 4;</code>
5380       */
5381      long getPreferredBlockSize();
5382
5383      // optional fixed64 permission = 5;
5384      /**
5385       * <code>optional fixed64 permission = 5;</code>
5386       */
5387      boolean hasPermission();
5388      /**
5389       * <code>optional fixed64 permission = 5;</code>
5390       */
5391      long getPermission();
5392
5393      // repeated .hadoop.hdfs.BlockProto blocks = 6;
5394      /**
5395       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5396       */
5397      java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> 
5398          getBlocksList();
5399      /**
5400       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5401       */
5402      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
5403      /**
5404       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5405       */
5406      int getBlocksCount();
5407      /**
5408       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5409       */
5410      java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
5411          getBlocksOrBuilderList();
5412      /**
5413       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5414       */
5415      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
5416          int index);
5417
5418      // optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
5419      /**
5420       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5421       */
5422      boolean hasFileUC();
5423      /**
5424       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5425       */
5426      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC();
5427      /**
5428       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5429       */
5430      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder();
5431
5432      // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
5433      /**
5434       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5435       */
5436      boolean hasAcl();
5437      /**
5438       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5439       */
5440      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl();
5441      /**
5442       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5443       */
5444      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder();
5445
5446      // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;
5447      /**
5448       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5449       */
5450      boolean hasXAttrs();
5451      /**
5452       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5453       */
5454      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs();
5455      /**
5456       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5457       */
5458      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder();
5459
5460      // optional uint32 storagePolicyID = 10;
5461      /**
5462       * <code>optional uint32 storagePolicyID = 10;</code>
5463       */
5464      boolean hasStoragePolicyID();
5465      /**
5466       * <code>optional uint32 storagePolicyID = 10;</code>
5467       */
5468      int getStoragePolicyID();
5469    }
5470    /**
5471     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile}
5472     */
5473    public static final class INodeFile extends
5474        com.google.protobuf.GeneratedMessage
5475        implements INodeFileOrBuilder {
5476      // Use INodeFile.newBuilder() to construct.
5477      private INodeFile(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5478        super(builder);
5479        this.unknownFields = builder.getUnknownFields();
5480      }
5481      private INodeFile(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5482
5483      private static final INodeFile defaultInstance;
5484      public static INodeFile getDefaultInstance() {
5485        return defaultInstance;
5486      }
5487
5488      public INodeFile getDefaultInstanceForType() {
5489        return defaultInstance;
5490      }
5491
5492      private final com.google.protobuf.UnknownFieldSet unknownFields;
5493      @java.lang.Override
5494      public final com.google.protobuf.UnknownFieldSet
5495          getUnknownFields() {
5496        return this.unknownFields;
5497      }
5498      private INodeFile(
5499          com.google.protobuf.CodedInputStream input,
5500          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5501          throws com.google.protobuf.InvalidProtocolBufferException {
5502        initFields();
5503        int mutable_bitField0_ = 0;
5504        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5505            com.google.protobuf.UnknownFieldSet.newBuilder();
5506        try {
5507          boolean done = false;
5508          while (!done) {
5509            int tag = input.readTag();
5510            switch (tag) {
5511              case 0:
5512                done = true;
5513                break;
5514              default: {
5515                if (!parseUnknownField(input, unknownFields,
5516                                       extensionRegistry, tag)) {
5517                  done = true;
5518                }
5519                break;
5520              }
5521              case 8: {
5522                bitField0_ |= 0x00000001;
5523                replication_ = input.readUInt32();
5524                break;
5525              }
5526              case 16: {
5527                bitField0_ |= 0x00000002;
5528                modificationTime_ = input.readUInt64();
5529                break;
5530              }
5531              case 24: {
5532                bitField0_ |= 0x00000004;
5533                accessTime_ = input.readUInt64();
5534                break;
5535              }
5536              case 32: {
5537                bitField0_ |= 0x00000008;
5538                preferredBlockSize_ = input.readUInt64();
5539                break;
5540              }
5541              case 41: {
5542                bitField0_ |= 0x00000010;
5543                permission_ = input.readFixed64();
5544                break;
5545              }
5546              case 50: {
5547                if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
5548                  blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>();
5549                  mutable_bitField0_ |= 0x00000020;
5550                }
5551                blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry));
5552                break;
5553              }
5554              case 58: {
5555                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder subBuilder = null;
5556                if (((bitField0_ & 0x00000020) == 0x00000020)) {
5557                  subBuilder = fileUC_.toBuilder();
5558                }
5559                fileUC_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.PARSER, extensionRegistry);
5560                if (subBuilder != null) {
5561                  subBuilder.mergeFrom(fileUC_);
5562                  fileUC_ = subBuilder.buildPartial();
5563                }
5564                bitField0_ |= 0x00000020;
5565                break;
5566              }
5567              case 66: {
5568                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder subBuilder = null;
5569                if (((bitField0_ & 0x00000040) == 0x00000040)) {
5570                  subBuilder = acl_.toBuilder();
5571                }
5572                acl_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.PARSER, extensionRegistry);
5573                if (subBuilder != null) {
5574                  subBuilder.mergeFrom(acl_);
5575                  acl_ = subBuilder.buildPartial();
5576                }
5577                bitField0_ |= 0x00000040;
5578                break;
5579              }
5580              case 74: {
5581                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder subBuilder = null;
5582                if (((bitField0_ & 0x00000080) == 0x00000080)) {
5583                  subBuilder = xAttrs_.toBuilder();
5584                }
5585                xAttrs_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.PARSER, extensionRegistry);
5586                if (subBuilder != null) {
5587                  subBuilder.mergeFrom(xAttrs_);
5588                  xAttrs_ = subBuilder.buildPartial();
5589                }
5590                bitField0_ |= 0x00000080;
5591                break;
5592              }
5593              case 80: {
5594                bitField0_ |= 0x00000100;
5595                storagePolicyID_ = input.readUInt32();
5596                break;
5597              }
5598            }
5599          }
5600        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5601          throw e.setUnfinishedMessage(this);
5602        } catch (java.io.IOException e) {
5603          throw new com.google.protobuf.InvalidProtocolBufferException(
5604              e.getMessage()).setUnfinishedMessage(this);
5605        } finally {
5606          if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
5607            blocks_ = java.util.Collections.unmodifiableList(blocks_);
5608          }
5609          this.unknownFields = unknownFields.build();
5610          makeExtensionsImmutable();
5611        }
5612      }
5613      public static final com.google.protobuf.Descriptors.Descriptor
5614          getDescriptor() {
5615        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
5616      }
5617
5618      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5619          internalGetFieldAccessorTable() {
5620        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable
5621            .ensureFieldAccessorsInitialized(
5622                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class);
5623      }
5624
5625      public static com.google.protobuf.Parser<INodeFile> PARSER =
5626          new com.google.protobuf.AbstractParser<INodeFile>() {
5627        public INodeFile parsePartialFrom(
5628            com.google.protobuf.CodedInputStream input,
5629            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5630            throws com.google.protobuf.InvalidProtocolBufferException {
5631          return new INodeFile(input, extensionRegistry);
5632        }
5633      };
5634
5635      @java.lang.Override
5636      public com.google.protobuf.Parser<INodeFile> getParserForType() {
5637        return PARSER;
5638      }
5639
5640      private int bitField0_;
5641      // optional uint32 replication = 1;
5642      public static final int REPLICATION_FIELD_NUMBER = 1;
5643      private int replication_;
5644      /**
5645       * <code>optional uint32 replication = 1;</code>
5646       */
5647      public boolean hasReplication() {
5648        return ((bitField0_ & 0x00000001) == 0x00000001);
5649      }
5650      /**
5651       * <code>optional uint32 replication = 1;</code>
5652       */
5653      public int getReplication() {
5654        return replication_;
5655      }
5656
5657      // optional uint64 modificationTime = 2;
5658      public static final int MODIFICATIONTIME_FIELD_NUMBER = 2;
5659      private long modificationTime_;
5660      /**
5661       * <code>optional uint64 modificationTime = 2;</code>
5662       */
5663      public boolean hasModificationTime() {
5664        return ((bitField0_ & 0x00000002) == 0x00000002);
5665      }
5666      /**
5667       * <code>optional uint64 modificationTime = 2;</code>
5668       */
5669      public long getModificationTime() {
5670        return modificationTime_;
5671      }
5672
5673      // optional uint64 accessTime = 3;
5674      public static final int ACCESSTIME_FIELD_NUMBER = 3;
5675      private long accessTime_;
5676      /**
5677       * <code>optional uint64 accessTime = 3;</code>
5678       */
5679      public boolean hasAccessTime() {
5680        return ((bitField0_ & 0x00000004) == 0x00000004);
5681      }
5682      /**
5683       * <code>optional uint64 accessTime = 3;</code>
5684       */
5685      public long getAccessTime() {
5686        return accessTime_;
5687      }
5688
5689      // optional uint64 preferredBlockSize = 4;
5690      public static final int PREFERREDBLOCKSIZE_FIELD_NUMBER = 4;
5691      private long preferredBlockSize_;
5692      /**
5693       * <code>optional uint64 preferredBlockSize = 4;</code>
5694       */
5695      public boolean hasPreferredBlockSize() {
5696        return ((bitField0_ & 0x00000008) == 0x00000008);
5697      }
5698      /**
5699       * <code>optional uint64 preferredBlockSize = 4;</code>
5700       */
5701      public long getPreferredBlockSize() {
5702        return preferredBlockSize_;
5703      }
5704
5705      // optional fixed64 permission = 5;
5706      public static final int PERMISSION_FIELD_NUMBER = 5;
5707      private long permission_;
5708      /**
5709       * <code>optional fixed64 permission = 5;</code>
5710       */
5711      public boolean hasPermission() {
5712        return ((bitField0_ & 0x00000010) == 0x00000010);
5713      }
5714      /**
5715       * <code>optional fixed64 permission = 5;</code>
5716       */
5717      public long getPermission() {
5718        return permission_;
5719      }
5720
5721      // repeated .hadoop.hdfs.BlockProto blocks = 6;
5722      public static final int BLOCKS_FIELD_NUMBER = 6;
5723      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
5724      /**
5725       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5726       */
5727      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
5728        return blocks_;
5729      }
5730      /**
5731       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5732       */
5733      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
5734          getBlocksOrBuilderList() {
5735        return blocks_;
5736      }
5737      /**
5738       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5739       */
5740      public int getBlocksCount() {
5741        return blocks_.size();
5742      }
5743      /**
5744       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5745       */
5746      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
5747        return blocks_.get(index);
5748      }
5749      /**
5750       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
5751       */
5752      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
5753          int index) {
5754        return blocks_.get(index);
5755      }
5756
5757      // optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
5758      public static final int FILEUC_FIELD_NUMBER = 7;
5759      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_;
5760      /**
5761       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5762       */
5763      public boolean hasFileUC() {
5764        return ((bitField0_ & 0x00000020) == 0x00000020);
5765      }
5766      /**
5767       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5768       */
5769      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() {
5770        return fileUC_;
5771      }
5772      /**
5773       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
5774       */
5775      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() {
5776        return fileUC_;
5777      }
5778
5779      // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
5780      public static final int ACL_FIELD_NUMBER = 8;
5781      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
5782      /**
5783       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5784       */
5785      public boolean hasAcl() {
5786        return ((bitField0_ & 0x00000040) == 0x00000040);
5787      }
5788      /**
5789       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5790       */
5791      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
5792        return acl_;
5793      }
5794      /**
5795       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
5796       */
5797      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
5798        return acl_;
5799      }
5800
5801      // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;
5802      public static final int XATTRS_FIELD_NUMBER = 9;
5803      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
5804      /**
5805       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5806       */
5807      public boolean hasXAttrs() {
5808        return ((bitField0_ & 0x00000080) == 0x00000080);
5809      }
5810      /**
5811       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5812       */
5813      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
5814        return xAttrs_;
5815      }
5816      /**
5817       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
5818       */
5819      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
5820        return xAttrs_;
5821      }
5822
5823      // optional uint32 storagePolicyID = 10;
5824      public static final int STORAGEPOLICYID_FIELD_NUMBER = 10;
5825      private int storagePolicyID_;
5826      /**
5827       * <code>optional uint32 storagePolicyID = 10;</code>
5828       */
5829      public boolean hasStoragePolicyID() {
5830        return ((bitField0_ & 0x00000100) == 0x00000100);
5831      }
5832      /**
5833       * <code>optional uint32 storagePolicyID = 10;</code>
5834       */
5835      public int getStoragePolicyID() {
5836        return storagePolicyID_;
5837      }
5838
5839      private void initFields() {
5840        replication_ = 0;
5841        modificationTime_ = 0L;
5842        accessTime_ = 0L;
5843        preferredBlockSize_ = 0L;
5844        permission_ = 0L;
5845        blocks_ = java.util.Collections.emptyList();
5846        fileUC_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
5847        acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
5848        xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
5849        storagePolicyID_ = 0;
5850      }
5851      private byte memoizedIsInitialized = -1;
5852      public final boolean isInitialized() {
5853        byte isInitialized = memoizedIsInitialized;
5854        if (isInitialized != -1) return isInitialized == 1;
5855
5856        for (int i = 0; i < getBlocksCount(); i++) {
5857          if (!getBlocks(i).isInitialized()) {
5858            memoizedIsInitialized = 0;
5859            return false;
5860          }
5861        }
5862        if (hasXAttrs()) {
5863          if (!getXAttrs().isInitialized()) {
5864            memoizedIsInitialized = 0;
5865            return false;
5866          }
5867        }
5868        memoizedIsInitialized = 1;
5869        return true;
5870      }
5871
5872      public void writeTo(com.google.protobuf.CodedOutputStream output)
5873                          throws java.io.IOException {
5874        getSerializedSize();
5875        if (((bitField0_ & 0x00000001) == 0x00000001)) {
5876          output.writeUInt32(1, replication_);
5877        }
5878        if (((bitField0_ & 0x00000002) == 0x00000002)) {
5879          output.writeUInt64(2, modificationTime_);
5880        }
5881        if (((bitField0_ & 0x00000004) == 0x00000004)) {
5882          output.writeUInt64(3, accessTime_);
5883        }
5884        if (((bitField0_ & 0x00000008) == 0x00000008)) {
5885          output.writeUInt64(4, preferredBlockSize_);
5886        }
5887        if (((bitField0_ & 0x00000010) == 0x00000010)) {
5888          output.writeFixed64(5, permission_);
5889        }
5890        for (int i = 0; i < blocks_.size(); i++) {
5891          output.writeMessage(6, blocks_.get(i));
5892        }
5893        if (((bitField0_ & 0x00000020) == 0x00000020)) {
5894          output.writeMessage(7, fileUC_);
5895        }
5896        if (((bitField0_ & 0x00000040) == 0x00000040)) {
5897          output.writeMessage(8, acl_);
5898        }
5899        if (((bitField0_ & 0x00000080) == 0x00000080)) {
5900          output.writeMessage(9, xAttrs_);
5901        }
5902        if (((bitField0_ & 0x00000100) == 0x00000100)) {
5903          output.writeUInt32(10, storagePolicyID_);
5904        }
5905        getUnknownFields().writeTo(output);
5906      }
5907
5908      private int memoizedSerializedSize = -1;
5909      public int getSerializedSize() {
5910        int size = memoizedSerializedSize;
5911        if (size != -1) return size;
5912
5913        size = 0;
5914        if (((bitField0_ & 0x00000001) == 0x00000001)) {
5915          size += com.google.protobuf.CodedOutputStream
5916            .computeUInt32Size(1, replication_);
5917        }
5918        if (((bitField0_ & 0x00000002) == 0x00000002)) {
5919          size += com.google.protobuf.CodedOutputStream
5920            .computeUInt64Size(2, modificationTime_);
5921        }
5922        if (((bitField0_ & 0x00000004) == 0x00000004)) {
5923          size += com.google.protobuf.CodedOutputStream
5924            .computeUInt64Size(3, accessTime_);
5925        }
5926        if (((bitField0_ & 0x00000008) == 0x00000008)) {
5927          size += com.google.protobuf.CodedOutputStream
5928            .computeUInt64Size(4, preferredBlockSize_);
5929        }
5930        if (((bitField0_ & 0x00000010) == 0x00000010)) {
5931          size += com.google.protobuf.CodedOutputStream
5932            .computeFixed64Size(5, permission_);
5933        }
5934        for (int i = 0; i < blocks_.size(); i++) {
5935          size += com.google.protobuf.CodedOutputStream
5936            .computeMessageSize(6, blocks_.get(i));
5937        }
5938        if (((bitField0_ & 0x00000020) == 0x00000020)) {
5939          size += com.google.protobuf.CodedOutputStream
5940            .computeMessageSize(7, fileUC_);
5941        }
5942        if (((bitField0_ & 0x00000040) == 0x00000040)) {
5943          size += com.google.protobuf.CodedOutputStream
5944            .computeMessageSize(8, acl_);
5945        }
5946        if (((bitField0_ & 0x00000080) == 0x00000080)) {
5947          size += com.google.protobuf.CodedOutputStream
5948            .computeMessageSize(9, xAttrs_);
5949        }
5950        if (((bitField0_ & 0x00000100) == 0x00000100)) {
5951          size += com.google.protobuf.CodedOutputStream
5952            .computeUInt32Size(10, storagePolicyID_);
5953        }
5954        size += getUnknownFields().getSerializedSize();
5955        memoizedSerializedSize = size;
5956        return size;
5957      }
5958
5959      private static final long serialVersionUID = 0L;
5960      @java.lang.Override
5961      protected java.lang.Object writeReplace()
5962          throws java.io.ObjectStreamException {
5963        return super.writeReplace();
5964      }
5965
5966      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
5967          com.google.protobuf.ByteString data)
5968          throws com.google.protobuf.InvalidProtocolBufferException {
5969        return PARSER.parseFrom(data);
5970      }
5971      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
5972          com.google.protobuf.ByteString data,
5973          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5974          throws com.google.protobuf.InvalidProtocolBufferException {
5975        return PARSER.parseFrom(data, extensionRegistry);
5976      }
5977      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(byte[] data)
5978          throws com.google.protobuf.InvalidProtocolBufferException {
5979        return PARSER.parseFrom(data);
5980      }
5981      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
5982          byte[] data,
5983          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5984          throws com.google.protobuf.InvalidProtocolBufferException {
5985        return PARSER.parseFrom(data, extensionRegistry);
5986      }
5987      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(java.io.InputStream input)
5988          throws java.io.IOException {
5989        return PARSER.parseFrom(input);
5990      }
5991      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
5992          java.io.InputStream input,
5993          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5994          throws java.io.IOException {
5995        return PARSER.parseFrom(input, extensionRegistry);
5996      }
5997      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom(java.io.InputStream input)
5998          throws java.io.IOException {
5999        return PARSER.parseDelimitedFrom(input);
6000      }
6001      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom(
6002          java.io.InputStream input,
6003          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6004          throws java.io.IOException {
6005        return PARSER.parseDelimitedFrom(input, extensionRegistry);
6006      }
6007      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
6008          com.google.protobuf.CodedInputStream input)
6009          throws java.io.IOException {
6010        return PARSER.parseFrom(input);
6011      }
6012      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
6013          com.google.protobuf.CodedInputStream input,
6014          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6015          throws java.io.IOException {
6016        return PARSER.parseFrom(input, extensionRegistry);
6017      }
6018
6019      public static Builder newBuilder() { return Builder.create(); }
6020      public Builder newBuilderForType() { return newBuilder(); }
6021      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
6022        return newBuilder().mergeFrom(prototype);
6023      }
6024      public Builder toBuilder() { return newBuilder(this); }
6025
6026      @java.lang.Override
6027      protected Builder newBuilderForType(
6028          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6029        Builder builder = new Builder(parent);
6030        return builder;
6031      }
6032      /**
6033       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile}
6034       */
6035      public static final class Builder extends
6036          com.google.protobuf.GeneratedMessage.Builder<Builder>
6037         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder {
6038        public static final com.google.protobuf.Descriptors.Descriptor
6039            getDescriptor() {
6040          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
6041        }
6042
6043        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6044            internalGetFieldAccessorTable() {
6045          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable
6046              .ensureFieldAccessorsInitialized(
6047                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class);
6048        }
6049
6050        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder()
6051        private Builder() {
6052          maybeForceBuilderInitialization();
6053        }
6054
6055        private Builder(
6056            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6057          super(parent);
6058          maybeForceBuilderInitialization();
6059        }
6060        private void maybeForceBuilderInitialization() {
6061          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6062            getBlocksFieldBuilder();
6063            getFileUCFieldBuilder();
6064            getAclFieldBuilder();
6065            getXAttrsFieldBuilder();
6066          }
6067        }
6068        private static Builder create() {
6069          return new Builder();
6070        }
6071
6072        public Builder clear() {
6073          super.clear();
6074          replication_ = 0;
6075          bitField0_ = (bitField0_ & ~0x00000001);
6076          modificationTime_ = 0L;
6077          bitField0_ = (bitField0_ & ~0x00000002);
6078          accessTime_ = 0L;
6079          bitField0_ = (bitField0_ & ~0x00000004);
6080          preferredBlockSize_ = 0L;
6081          bitField0_ = (bitField0_ & ~0x00000008);
6082          permission_ = 0L;
6083          bitField0_ = (bitField0_ & ~0x00000010);
6084          if (blocksBuilder_ == null) {
6085            blocks_ = java.util.Collections.emptyList();
6086            bitField0_ = (bitField0_ & ~0x00000020);
6087          } else {
6088            blocksBuilder_.clear();
6089          }
6090          if (fileUCBuilder_ == null) {
6091            fileUC_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
6092          } else {
6093            fileUCBuilder_.clear();
6094          }
6095          bitField0_ = (bitField0_ & ~0x00000040);
6096          if (aclBuilder_ == null) {
6097            acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
6098          } else {
6099            aclBuilder_.clear();
6100          }
6101          bitField0_ = (bitField0_ & ~0x00000080);
6102          if (xAttrsBuilder_ == null) {
6103            xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
6104          } else {
6105            xAttrsBuilder_.clear();
6106          }
6107          bitField0_ = (bitField0_ & ~0x00000100);
6108          storagePolicyID_ = 0;
6109          bitField0_ = (bitField0_ & ~0x00000200);
6110          return this;
6111        }
6112
6113        public Builder clone() {
6114          return create().mergeFrom(buildPartial());
6115        }
6116
6117        public com.google.protobuf.Descriptors.Descriptor
6118            getDescriptorForType() {
6119          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
6120        }
6121
6122        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
6123          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
6124        }
6125
6126        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile build() {
6127          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = buildPartial();
6128          if (!result.isInitialized()) {
6129            throw newUninitializedMessageException(result);
6130          }
6131          return result;
6132        }
6133
6134        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() {
6135          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this);
6136          int from_bitField0_ = bitField0_;
6137          int to_bitField0_ = 0;
6138          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6139            to_bitField0_ |= 0x00000001;
6140          }
6141          result.replication_ = replication_;
6142          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6143            to_bitField0_ |= 0x00000002;
6144          }
6145          result.modificationTime_ = modificationTime_;
6146          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6147            to_bitField0_ |= 0x00000004;
6148          }
6149          result.accessTime_ = accessTime_;
6150          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
6151            to_bitField0_ |= 0x00000008;
6152          }
6153          result.preferredBlockSize_ = preferredBlockSize_;
6154          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
6155            to_bitField0_ |= 0x00000010;
6156          }
6157          result.permission_ = permission_;
6158          if (blocksBuilder_ == null) {
6159            if (((bitField0_ & 0x00000020) == 0x00000020)) {
6160              blocks_ = java.util.Collections.unmodifiableList(blocks_);
6161              bitField0_ = (bitField0_ & ~0x00000020);
6162            }
6163            result.blocks_ = blocks_;
6164          } else {
6165            result.blocks_ = blocksBuilder_.build();
6166          }
6167          if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
6168            to_bitField0_ |= 0x00000020;
6169          }
6170          if (fileUCBuilder_ == null) {
6171            result.fileUC_ = fileUC_;
6172          } else {
6173            result.fileUC_ = fileUCBuilder_.build();
6174          }
6175          if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
6176            to_bitField0_ |= 0x00000040;
6177          }
6178          if (aclBuilder_ == null) {
6179            result.acl_ = acl_;
6180          } else {
6181            result.acl_ = aclBuilder_.build();
6182          }
6183          if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
6184            to_bitField0_ |= 0x00000080;
6185          }
6186          if (xAttrsBuilder_ == null) {
6187            result.xAttrs_ = xAttrs_;
6188          } else {
6189            result.xAttrs_ = xAttrsBuilder_.build();
6190          }
6191          if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
6192            to_bitField0_ |= 0x00000100;
6193          }
6194          result.storagePolicyID_ = storagePolicyID_;
6195          result.bitField0_ = to_bitField0_;
6196          onBuilt();
6197          return result;
6198        }
6199
6200        public Builder mergeFrom(com.google.protobuf.Message other) {
6201          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) {
6202            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile)other);
6203          } else {
6204            super.mergeFrom(other);
6205            return this;
6206          }
6207        }
6208
6209        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) {
6210          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this;
6211          if (other.hasReplication()) {
6212            setReplication(other.getReplication());
6213          }
6214          if (other.hasModificationTime()) {
6215            setModificationTime(other.getModificationTime());
6216          }
6217          if (other.hasAccessTime()) {
6218            setAccessTime(other.getAccessTime());
6219          }
6220          if (other.hasPreferredBlockSize()) {
6221            setPreferredBlockSize(other.getPreferredBlockSize());
6222          }
6223          if (other.hasPermission()) {
6224            setPermission(other.getPermission());
6225          }
6226          if (blocksBuilder_ == null) {
6227            if (!other.blocks_.isEmpty()) {
6228              if (blocks_.isEmpty()) {
6229                blocks_ = other.blocks_;
6230                bitField0_ = (bitField0_ & ~0x00000020);
6231              } else {
6232                ensureBlocksIsMutable();
6233                blocks_.addAll(other.blocks_);
6234              }
6235              onChanged();
6236            }
6237          } else {
6238            if (!other.blocks_.isEmpty()) {
6239              if (blocksBuilder_.isEmpty()) {
6240                blocksBuilder_.dispose();
6241                blocksBuilder_ = null;
6242                blocks_ = other.blocks_;
6243                bitField0_ = (bitField0_ & ~0x00000020);
6244                blocksBuilder_ = 
6245                  com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
6246                     getBlocksFieldBuilder() : null;
6247              } else {
6248                blocksBuilder_.addAllMessages(other.blocks_);
6249              }
6250            }
6251          }
6252          if (other.hasFileUC()) {
6253            mergeFileUC(other.getFileUC());
6254          }
6255          if (other.hasAcl()) {
6256            mergeAcl(other.getAcl());
6257          }
6258          if (other.hasXAttrs()) {
6259            mergeXAttrs(other.getXAttrs());
6260          }
6261          if (other.hasStoragePolicyID()) {
6262            setStoragePolicyID(other.getStoragePolicyID());
6263          }
6264          this.mergeUnknownFields(other.getUnknownFields());
6265          return this;
6266        }
6267
6268        public final boolean isInitialized() {
6269          for (int i = 0; i < getBlocksCount(); i++) {
6270            if (!getBlocks(i).isInitialized()) {
6271              
6272              return false;
6273            }
6274          }
6275          if (hasXAttrs()) {
6276            if (!getXAttrs().isInitialized()) {
6277              
6278              return false;
6279            }
6280          }
6281          return true;
6282        }
6283
6284        public Builder mergeFrom(
6285            com.google.protobuf.CodedInputStream input,
6286            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6287            throws java.io.IOException {
6288          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parsedMessage = null;
6289          try {
6290            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6291          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6292            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) e.getUnfinishedMessage();
6293            throw e;
6294          } finally {
6295            if (parsedMessage != null) {
6296              mergeFrom(parsedMessage);
6297            }
6298          }
6299          return this;
6300        }
6301        private int bitField0_;
6302
6303        // optional uint32 replication = 1;
6304        private int replication_ ;
6305        /**
6306         * <code>optional uint32 replication = 1;</code>
6307         */
6308        public boolean hasReplication() {
6309          return ((bitField0_ & 0x00000001) == 0x00000001);
6310        }
6311        /**
6312         * <code>optional uint32 replication = 1;</code>
6313         */
6314        public int getReplication() {
6315          return replication_;
6316        }
6317        /**
6318         * <code>optional uint32 replication = 1;</code>
6319         */
6320        public Builder setReplication(int value) {
6321          bitField0_ |= 0x00000001;
6322          replication_ = value;
6323          onChanged();
6324          return this;
6325        }
6326        /**
6327         * <code>optional uint32 replication = 1;</code>
6328         */
6329        public Builder clearReplication() {
6330          bitField0_ = (bitField0_ & ~0x00000001);
6331          replication_ = 0;
6332          onChanged();
6333          return this;
6334        }
6335
6336        // optional uint64 modificationTime = 2;
6337        private long modificationTime_ ;
6338        /**
6339         * <code>optional uint64 modificationTime = 2;</code>
6340         */
6341        public boolean hasModificationTime() {
6342          return ((bitField0_ & 0x00000002) == 0x00000002);
6343        }
6344        /**
6345         * <code>optional uint64 modificationTime = 2;</code>
6346         */
6347        public long getModificationTime() {
6348          return modificationTime_;
6349        }
6350        /**
6351         * <code>optional uint64 modificationTime = 2;</code>
6352         */
6353        public Builder setModificationTime(long value) {
6354          bitField0_ |= 0x00000002;
6355          modificationTime_ = value;
6356          onChanged();
6357          return this;
6358        }
6359        /**
6360         * <code>optional uint64 modificationTime = 2;</code>
6361         */
6362        public Builder clearModificationTime() {
6363          bitField0_ = (bitField0_ & ~0x00000002);
6364          modificationTime_ = 0L;
6365          onChanged();
6366          return this;
6367        }
6368
6369        // optional uint64 accessTime = 3;
6370        private long accessTime_ ;
6371        /**
6372         * <code>optional uint64 accessTime = 3;</code>
6373         */
6374        public boolean hasAccessTime() {
6375          return ((bitField0_ & 0x00000004) == 0x00000004);
6376        }
6377        /**
6378         * <code>optional uint64 accessTime = 3;</code>
6379         */
6380        public long getAccessTime() {
6381          return accessTime_;
6382        }
6383        /**
6384         * <code>optional uint64 accessTime = 3;</code>
6385         */
6386        public Builder setAccessTime(long value) {
6387          bitField0_ |= 0x00000004;
6388          accessTime_ = value;
6389          onChanged();
6390          return this;
6391        }
6392        /**
6393         * <code>optional uint64 accessTime = 3;</code>
6394         */
6395        public Builder clearAccessTime() {
6396          bitField0_ = (bitField0_ & ~0x00000004);
6397          accessTime_ = 0L;
6398          onChanged();
6399          return this;
6400        }
6401
6402        // optional uint64 preferredBlockSize = 4;
6403        private long preferredBlockSize_ ;
6404        /**
6405         * <code>optional uint64 preferredBlockSize = 4;</code>
6406         */
6407        public boolean hasPreferredBlockSize() {
6408          return ((bitField0_ & 0x00000008) == 0x00000008);
6409        }
6410        /**
6411         * <code>optional uint64 preferredBlockSize = 4;</code>
6412         */
6413        public long getPreferredBlockSize() {
6414          return preferredBlockSize_;
6415        }
6416        /**
6417         * <code>optional uint64 preferredBlockSize = 4;</code>
6418         */
6419        public Builder setPreferredBlockSize(long value) {
6420          bitField0_ |= 0x00000008;
6421          preferredBlockSize_ = value;
6422          onChanged();
6423          return this;
6424        }
6425        /**
6426         * <code>optional uint64 preferredBlockSize = 4;</code>
6427         */
6428        public Builder clearPreferredBlockSize() {
6429          bitField0_ = (bitField0_ & ~0x00000008);
6430          preferredBlockSize_ = 0L;
6431          onChanged();
6432          return this;
6433        }
6434
6435        // optional fixed64 permission = 5;
6436        private long permission_ ;
6437        /**
6438         * <code>optional fixed64 permission = 5;</code>
6439         */
6440        public boolean hasPermission() {
6441          return ((bitField0_ & 0x00000010) == 0x00000010);
6442        }
6443        /**
6444         * <code>optional fixed64 permission = 5;</code>
6445         */
6446        public long getPermission() {
6447          return permission_;
6448        }
6449        /**
6450         * <code>optional fixed64 permission = 5;</code>
6451         */
6452        public Builder setPermission(long value) {
6453          bitField0_ |= 0x00000010;
6454          permission_ = value;
6455          onChanged();
6456          return this;
6457        }
6458        /**
6459         * <code>optional fixed64 permission = 5;</code>
6460         */
6461        public Builder clearPermission() {
6462          bitField0_ = (bitField0_ & ~0x00000010);
6463          permission_ = 0L;
6464          onChanged();
6465          return this;
6466        }
6467
6468        // repeated .hadoop.hdfs.BlockProto blocks = 6;
6469        private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
6470          java.util.Collections.emptyList();
6471        private void ensureBlocksIsMutable() {
6472          if (!((bitField0_ & 0x00000020) == 0x00000020)) {
6473            blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
6474            bitField0_ |= 0x00000020;
6475           }
6476        }
6477
6478        private com.google.protobuf.RepeatedFieldBuilder<
6479            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;
6480
6481        /**
6482         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6483         */
6484        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
6485          if (blocksBuilder_ == null) {
6486            return java.util.Collections.unmodifiableList(blocks_);
6487          } else {
6488            return blocksBuilder_.getMessageList();
6489          }
6490        }
6491        /**
6492         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6493         */
6494        public int getBlocksCount() {
6495          if (blocksBuilder_ == null) {
6496            return blocks_.size();
6497          } else {
6498            return blocksBuilder_.getCount();
6499          }
6500        }
6501        /**
6502         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6503         */
6504        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
6505          if (blocksBuilder_ == null) {
6506            return blocks_.get(index);
6507          } else {
6508            return blocksBuilder_.getMessage(index);
6509          }
6510        }
6511        /**
6512         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6513         */
6514        public Builder setBlocks(
6515            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
6516          if (blocksBuilder_ == null) {
6517            if (value == null) {
6518              throw new NullPointerException();
6519            }
6520            ensureBlocksIsMutable();
6521            blocks_.set(index, value);
6522            onChanged();
6523          } else {
6524            blocksBuilder_.setMessage(index, value);
6525          }
6526          return this;
6527        }
6528        /**
6529         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6530         */
6531        public Builder setBlocks(
6532            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
6533          if (blocksBuilder_ == null) {
6534            ensureBlocksIsMutable();
6535            blocks_.set(index, builderForValue.build());
6536            onChanged();
6537          } else {
6538            blocksBuilder_.setMessage(index, builderForValue.build());
6539          }
6540          return this;
6541        }
6542        /**
6543         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6544         */
6545        public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
6546          if (blocksBuilder_ == null) {
6547            if (value == null) {
6548              throw new NullPointerException();
6549            }
6550            ensureBlocksIsMutable();
6551            blocks_.add(value);
6552            onChanged();
6553          } else {
6554            blocksBuilder_.addMessage(value);
6555          }
6556          return this;
6557        }
6558        /**
6559         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6560         */
6561        public Builder addBlocks(
6562            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
6563          if (blocksBuilder_ == null) {
6564            if (value == null) {
6565              throw new NullPointerException();
6566            }
6567            ensureBlocksIsMutable();
6568            blocks_.add(index, value);
6569            onChanged();
6570          } else {
6571            blocksBuilder_.addMessage(index, value);
6572          }
6573          return this;
6574        }
6575        /**
6576         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6577         */
6578        public Builder addBlocks(
6579            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
6580          if (blocksBuilder_ == null) {
6581            ensureBlocksIsMutable();
6582            blocks_.add(builderForValue.build());
6583            onChanged();
6584          } else {
6585            blocksBuilder_.addMessage(builderForValue.build());
6586          }
6587          return this;
6588        }
6589        /**
6590         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6591         */
6592        public Builder addBlocks(
6593            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
6594          if (blocksBuilder_ == null) {
6595            ensureBlocksIsMutable();
6596            blocks_.add(index, builderForValue.build());
6597            onChanged();
6598          } else {
6599            blocksBuilder_.addMessage(index, builderForValue.build());
6600          }
6601          return this;
6602        }
6603        /**
6604         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6605         */
6606        public Builder addAllBlocks(
6607            java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
6608          if (blocksBuilder_ == null) {
6609            ensureBlocksIsMutable();
6610            super.addAll(values, blocks_);
6611            onChanged();
6612          } else {
6613            blocksBuilder_.addAllMessages(values);
6614          }
6615          return this;
6616        }
6617        /**
6618         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6619         */
6620        public Builder clearBlocks() {
6621          if (blocksBuilder_ == null) {
6622            blocks_ = java.util.Collections.emptyList();
6623            bitField0_ = (bitField0_ & ~0x00000020);
6624            onChanged();
6625          } else {
6626            blocksBuilder_.clear();
6627          }
6628          return this;
6629        }
6630        /**
6631         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6632         */
6633        public Builder removeBlocks(int index) {
6634          if (blocksBuilder_ == null) {
6635            ensureBlocksIsMutable();
6636            blocks_.remove(index);
6637            onChanged();
6638          } else {
6639            blocksBuilder_.remove(index);
6640          }
6641          return this;
6642        }
6643        /**
6644         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6645         */
6646        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
6647            int index) {
6648          return getBlocksFieldBuilder().getBuilder(index);
6649        }
6650        /**
6651         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6652         */
6653        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
6654            int index) {
6655          if (blocksBuilder_ == null) {
6656            return blocks_.get(index);  } else {
6657            return blocksBuilder_.getMessageOrBuilder(index);
6658          }
6659        }
6660        /**
6661         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6662         */
6663        public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
6664             getBlocksOrBuilderList() {
6665          if (blocksBuilder_ != null) {
6666            return blocksBuilder_.getMessageOrBuilderList();
6667          } else {
6668            return java.util.Collections.unmodifiableList(blocks_);
6669          }
6670        }
6671        /**
6672         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6673         */
6674        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
6675          return getBlocksFieldBuilder().addBuilder(
6676              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
6677        }
6678        /**
6679         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6680         */
6681        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
6682            int index) {
6683          return getBlocksFieldBuilder().addBuilder(
6684              index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
6685        }
6686        /**
6687         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
6688         */
6689        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder> 
6690             getBlocksBuilderList() {
6691          return getBlocksFieldBuilder().getBuilderList();
6692        }
6693        private com.google.protobuf.RepeatedFieldBuilder<
6694            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
6695            getBlocksFieldBuilder() {
6696          if (blocksBuilder_ == null) {
6697            blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
6698                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
6699                    blocks_,
6700                    ((bitField0_ & 0x00000020) == 0x00000020),
6701                    getParentForChildren(),
6702                    isClean());
6703            blocks_ = null;
6704          }
6705          return blocksBuilder_;
6706        }
6707
6708        // optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
6709        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
6710        private com.google.protobuf.SingleFieldBuilder<
6711            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> fileUCBuilder_;
6712        /**
6713         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6714         */
6715        public boolean hasFileUC() {
6716          return ((bitField0_ & 0x00000040) == 0x00000040);
6717        }
6718        /**
6719         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6720         */
6721        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() {
6722          if (fileUCBuilder_ == null) {
6723            return fileUC_;
6724          } else {
6725            return fileUCBuilder_.getMessage();
6726          }
6727        }
6728        /**
6729         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6730         */
6731        public Builder setFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) {
6732          if (fileUCBuilder_ == null) {
6733            if (value == null) {
6734              throw new NullPointerException();
6735            }
6736            fileUC_ = value;
6737            onChanged();
6738          } else {
6739            fileUCBuilder_.setMessage(value);
6740          }
6741          bitField0_ |= 0x00000040;
6742          return this;
6743        }
6744        /**
6745         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6746         */
6747        public Builder setFileUC(
6748            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder builderForValue) {
6749          if (fileUCBuilder_ == null) {
6750            fileUC_ = builderForValue.build();
6751            onChanged();
6752          } else {
6753            fileUCBuilder_.setMessage(builderForValue.build());
6754          }
6755          bitField0_ |= 0x00000040;
6756          return this;
6757        }
6758        /**
6759         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6760         */
6761        public Builder mergeFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) {
6762          if (fileUCBuilder_ == null) {
6763            if (((bitField0_ & 0x00000040) == 0x00000040) &&
6764                fileUC_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) {
6765              fileUC_ =
6766                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.newBuilder(fileUC_).mergeFrom(value).buildPartial();
6767            } else {
6768              fileUC_ = value;
6769            }
6770            onChanged();
6771          } else {
6772            fileUCBuilder_.mergeFrom(value);
6773          }
6774          bitField0_ |= 0x00000040;
6775          return this;
6776        }
6777        /**
6778         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6779         */
6780        public Builder clearFileUC() {
6781          if (fileUCBuilder_ == null) {
6782            fileUC_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
6783            onChanged();
6784          } else {
6785            fileUCBuilder_.clear();
6786          }
6787          bitField0_ = (bitField0_ & ~0x00000040);
6788          return this;
6789        }
6790        /**
6791         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6792         */
6793        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder getFileUCBuilder() {
6794          bitField0_ |= 0x00000040;
6795          onChanged();
6796          return getFileUCFieldBuilder().getBuilder();
6797        }
6798        /**
6799         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6800         */
6801        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() {
6802          if (fileUCBuilder_ != null) {
6803            return fileUCBuilder_.getMessageOrBuilder();
6804          } else {
6805            return fileUC_;
6806          }
6807        }
6808        /**
6809         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
6810         */
6811        private com.google.protobuf.SingleFieldBuilder<
6812            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> 
6813            getFileUCFieldBuilder() {
6814          if (fileUCBuilder_ == null) {
6815            fileUCBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6816                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder>(
6817                    fileUC_,
6818                    getParentForChildren(),
6819                    isClean());
6820            fileUC_ = null;
6821          }
6822          return fileUCBuilder_;
6823        }
6824
6825        // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
6826        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
6827        private com.google.protobuf.SingleFieldBuilder<
6828            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_;
6829        /**
6830         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6831         */
6832        public boolean hasAcl() {
6833          return ((bitField0_ & 0x00000080) == 0x00000080);
6834        }
6835        /**
6836         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6837         */
6838        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
6839          if (aclBuilder_ == null) {
6840            return acl_;
6841          } else {
6842            return aclBuilder_.getMessage();
6843          }
6844        }
6845        /**
6846         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6847         */
6848        public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
6849          if (aclBuilder_ == null) {
6850            if (value == null) {
6851              throw new NullPointerException();
6852            }
6853            acl_ = value;
6854            onChanged();
6855          } else {
6856            aclBuilder_.setMessage(value);
6857          }
6858          bitField0_ |= 0x00000080;
6859          return this;
6860        }
6861        /**
6862         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6863         */
6864        public Builder setAcl(
6865            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) {
6866          if (aclBuilder_ == null) {
6867            acl_ = builderForValue.build();
6868            onChanged();
6869          } else {
6870            aclBuilder_.setMessage(builderForValue.build());
6871          }
6872          bitField0_ |= 0x00000080;
6873          return this;
6874        }
6875        /**
6876         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6877         */
6878        public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
6879          if (aclBuilder_ == null) {
6880            if (((bitField0_ & 0x00000080) == 0x00000080) &&
6881                acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) {
6882              acl_ =
6883                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder(acl_).mergeFrom(value).buildPartial();
6884            } else {
6885              acl_ = value;
6886            }
6887            onChanged();
6888          } else {
6889            aclBuilder_.mergeFrom(value);
6890          }
6891          bitField0_ |= 0x00000080;
6892          return this;
6893        }
6894        /**
6895         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6896         */
6897        public Builder clearAcl() {
6898          if (aclBuilder_ == null) {
6899            acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
6900            onChanged();
6901          } else {
6902            aclBuilder_.clear();
6903          }
6904          bitField0_ = (bitField0_ & ~0x00000080);
6905          return this;
6906        }
6907        /**
6908         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6909         */
6910        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() {
6911          bitField0_ |= 0x00000080;
6912          onChanged();
6913          return getAclFieldBuilder().getBuilder();
6914        }
6915        /**
6916         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6917         */
6918        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
6919          if (aclBuilder_ != null) {
6920            return aclBuilder_.getMessageOrBuilder();
6921          } else {
6922            return acl_;
6923          }
6924        }
6925        /**
6926         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
6927         */
6928        private com.google.protobuf.SingleFieldBuilder<
6929            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> 
6930            getAclFieldBuilder() {
6931          if (aclBuilder_ == null) {
6932            aclBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6933                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>(
6934                    acl_,
6935                    getParentForChildren(),
6936                    isClean());
6937            acl_ = null;
6938          }
6939          return aclBuilder_;
6940        }
6941
6942        // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;
6943        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
6944        private com.google.protobuf.SingleFieldBuilder<
6945            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_;
6946        /**
6947         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
6948         */
6949        public boolean hasXAttrs() {
6950          return ((bitField0_ & 0x00000100) == 0x00000100);
6951        }
6952        /**
6953         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
6954         */
6955        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
6956          if (xAttrsBuilder_ == null) {
6957            return xAttrs_;
6958          } else {
6959            return xAttrsBuilder_.getMessage();
6960          }
6961        }
6962        /**
6963         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
6964         */
6965        public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
6966          if (xAttrsBuilder_ == null) {
6967            if (value == null) {
6968              throw new NullPointerException();
6969            }
6970            xAttrs_ = value;
6971            onChanged();
6972          } else {
6973            xAttrsBuilder_.setMessage(value);
6974          }
6975          bitField0_ |= 0x00000100;
6976          return this;
6977        }
6978        /**
6979         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
6980         */
6981        public Builder setXAttrs(
6982            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) {
6983          if (xAttrsBuilder_ == null) {
6984            xAttrs_ = builderForValue.build();
6985            onChanged();
6986          } else {
6987            xAttrsBuilder_.setMessage(builderForValue.build());
6988          }
6989          bitField0_ |= 0x00000100;
6990          return this;
6991        }
6992        /**
6993         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
6994         */
6995        public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
6996          if (xAttrsBuilder_ == null) {
6997            if (((bitField0_ & 0x00000100) == 0x00000100) &&
6998                xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) {
6999              xAttrs_ =
7000                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder(xAttrs_).mergeFrom(value).buildPartial();
7001            } else {
7002              xAttrs_ = value;
7003            }
7004            onChanged();
7005          } else {
7006            xAttrsBuilder_.mergeFrom(value);
7007          }
7008          bitField0_ |= 0x00000100;
7009          return this;
7010        }
7011        /**
7012         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
7013         */
7014        public Builder clearXAttrs() {
7015          if (xAttrsBuilder_ == null) {
7016            xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
7017            onChanged();
7018          } else {
7019            xAttrsBuilder_.clear();
7020          }
7021          bitField0_ = (bitField0_ & ~0x00000100);
7022          return this;
7023        }
7024        /**
7025         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
7026         */
7027        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() {
7028          bitField0_ |= 0x00000100;
7029          onChanged();
7030          return getXAttrsFieldBuilder().getBuilder();
7031        }
7032        /**
7033         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
7034         */
7035        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
7036          if (xAttrsBuilder_ != null) {
7037            return xAttrsBuilder_.getMessageOrBuilder();
7038          } else {
7039            return xAttrs_;
7040          }
7041        }
7042        /**
7043         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
7044         */
7045        private com.google.protobuf.SingleFieldBuilder<
7046            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> 
7047            getXAttrsFieldBuilder() {
7048          if (xAttrsBuilder_ == null) {
7049            xAttrsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7050                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>(
7051                    xAttrs_,
7052                    getParentForChildren(),
7053                    isClean());
7054            xAttrs_ = null;
7055          }
7056          return xAttrsBuilder_;
7057        }
7058
7059        // optional uint32 storagePolicyID = 10;
7060        private int storagePolicyID_ ;
7061        /**
7062         * <code>optional uint32 storagePolicyID = 10;</code>
7063         */
7064        public boolean hasStoragePolicyID() {
7065          return ((bitField0_ & 0x00000200) == 0x00000200);
7066        }
7067        /**
7068         * <code>optional uint32 storagePolicyID = 10;</code>
7069         */
7070        public int getStoragePolicyID() {
7071          return storagePolicyID_;
7072        }
7073        /**
7074         * <code>optional uint32 storagePolicyID = 10;</code>
7075         */
7076        public Builder setStoragePolicyID(int value) {
7077          bitField0_ |= 0x00000200;
7078          storagePolicyID_ = value;
7079          onChanged();
7080          return this;
7081        }
7082        /**
7083         * <code>optional uint32 storagePolicyID = 10;</code>
7084         */
7085        public Builder clearStoragePolicyID() {
7086          bitField0_ = (bitField0_ & ~0x00000200);
7087          storagePolicyID_ = 0;
7088          onChanged();
7089          return this;
7090        }
7091
7092        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile)
7093      }
7094
7095      static {
7096        defaultInstance = new INodeFile(true);
7097        defaultInstance.initFields();
7098      }
7099
7100      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile)
7101    }
7102
7103    public interface QuotaByStorageTypeEntryProtoOrBuilder
7104        extends com.google.protobuf.MessageOrBuilder {
7105
7106      // required .hadoop.hdfs.StorageTypeProto storageType = 1;
7107      /**
7108       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7109       */
7110      boolean hasStorageType();
7111      /**
7112       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7113       */
7114      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
7115
7116      // required uint64 quota = 2;
7117      /**
7118       * <code>required uint64 quota = 2;</code>
7119       */
7120      boolean hasQuota();
7121      /**
7122       * <code>required uint64 quota = 2;</code>
7123       */
7124      long getQuota();
7125    }
7126    /**
7127     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto}
7128     */
7129    public static final class QuotaByStorageTypeEntryProto extends
7130        com.google.protobuf.GeneratedMessage
7131        implements QuotaByStorageTypeEntryProtoOrBuilder {
7132      // Use QuotaByStorageTypeEntryProto.newBuilder() to construct.
7133      private QuotaByStorageTypeEntryProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7134        super(builder);
7135        this.unknownFields = builder.getUnknownFields();
7136      }
7137      private QuotaByStorageTypeEntryProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7138
7139      private static final QuotaByStorageTypeEntryProto defaultInstance;
7140      public static QuotaByStorageTypeEntryProto getDefaultInstance() {
7141        return defaultInstance;
7142      }
7143
7144      public QuotaByStorageTypeEntryProto getDefaultInstanceForType() {
7145        return defaultInstance;
7146      }
7147
7148      private final com.google.protobuf.UnknownFieldSet unknownFields;
7149      @java.lang.Override
7150      public final com.google.protobuf.UnknownFieldSet
7151          getUnknownFields() {
7152        return this.unknownFields;
7153      }
7154      private QuotaByStorageTypeEntryProto(
7155          com.google.protobuf.CodedInputStream input,
7156          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7157          throws com.google.protobuf.InvalidProtocolBufferException {
7158        initFields();
7159        int mutable_bitField0_ = 0;
7160        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7161            com.google.protobuf.UnknownFieldSet.newBuilder();
7162        try {
7163          boolean done = false;
7164          while (!done) {
7165            int tag = input.readTag();
7166            switch (tag) {
7167              case 0:
7168                done = true;
7169                break;
7170              default: {
7171                if (!parseUnknownField(input, unknownFields,
7172                                       extensionRegistry, tag)) {
7173                  done = true;
7174                }
7175                break;
7176              }
7177              case 8: {
7178                int rawValue = input.readEnum();
7179                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.valueOf(rawValue);
7180                if (value == null) {
7181                  unknownFields.mergeVarintField(1, rawValue);
7182                } else {
7183                  bitField0_ |= 0x00000001;
7184                  storageType_ = value;
7185                }
7186                break;
7187              }
7188              case 16: {
7189                bitField0_ |= 0x00000002;
7190                quota_ = input.readUInt64();
7191                break;
7192              }
7193            }
7194          }
7195        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7196          throw e.setUnfinishedMessage(this);
7197        } catch (java.io.IOException e) {
7198          throw new com.google.protobuf.InvalidProtocolBufferException(
7199              e.getMessage()).setUnfinishedMessage(this);
7200        } finally {
7201          this.unknownFields = unknownFields.build();
7202          makeExtensionsImmutable();
7203        }
7204      }
7205      public static final com.google.protobuf.Descriptors.Descriptor
7206          getDescriptor() {
7207        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
7208      }
7209
7210      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7211          internalGetFieldAccessorTable() {
7212        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable
7213            .ensureFieldAccessorsInitialized(
7214                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class);
7215      }
7216
7217      public static com.google.protobuf.Parser<QuotaByStorageTypeEntryProto> PARSER =
7218          new com.google.protobuf.AbstractParser<QuotaByStorageTypeEntryProto>() {
7219        public QuotaByStorageTypeEntryProto parsePartialFrom(
7220            com.google.protobuf.CodedInputStream input,
7221            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7222            throws com.google.protobuf.InvalidProtocolBufferException {
7223          return new QuotaByStorageTypeEntryProto(input, extensionRegistry);
7224        }
7225      };
7226
7227      @java.lang.Override
7228      public com.google.protobuf.Parser<QuotaByStorageTypeEntryProto> getParserForType() {
7229        return PARSER;
7230      }
7231
7232      private int bitField0_;
7233      // required .hadoop.hdfs.StorageTypeProto storageType = 1;
7234      public static final int STORAGETYPE_FIELD_NUMBER = 1;
7235      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_;
7236      /**
7237       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7238       */
7239      public boolean hasStorageType() {
7240        return ((bitField0_ & 0x00000001) == 0x00000001);
7241      }
7242      /**
7243       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7244       */
7245      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
7246        return storageType_;
7247      }
7248
7249      // required uint64 quota = 2;
7250      public static final int QUOTA_FIELD_NUMBER = 2;
7251      private long quota_;
7252      /**
7253       * <code>required uint64 quota = 2;</code>
7254       */
7255      public boolean hasQuota() {
7256        return ((bitField0_ & 0x00000002) == 0x00000002);
7257      }
7258      /**
7259       * <code>required uint64 quota = 2;</code>
7260       */
7261      public long getQuota() {
7262        return quota_;
7263      }
7264
7265      private void initFields() {
7266        storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
7267        quota_ = 0L;
7268      }
7269      private byte memoizedIsInitialized = -1;
7270      public final boolean isInitialized() {
7271        byte isInitialized = memoizedIsInitialized;
7272        if (isInitialized != -1) return isInitialized == 1;
7273
7274        if (!hasStorageType()) {
7275          memoizedIsInitialized = 0;
7276          return false;
7277        }
7278        if (!hasQuota()) {
7279          memoizedIsInitialized = 0;
7280          return false;
7281        }
7282        memoizedIsInitialized = 1;
7283        return true;
7284      }
7285
7286      public void writeTo(com.google.protobuf.CodedOutputStream output)
7287                          throws java.io.IOException {
7288        getSerializedSize();
7289        if (((bitField0_ & 0x00000001) == 0x00000001)) {
7290          output.writeEnum(1, storageType_.getNumber());
7291        }
7292        if (((bitField0_ & 0x00000002) == 0x00000002)) {
7293          output.writeUInt64(2, quota_);
7294        }
7295        getUnknownFields().writeTo(output);
7296      }
7297
7298      private int memoizedSerializedSize = -1;
7299      public int getSerializedSize() {
7300        int size = memoizedSerializedSize;
7301        if (size != -1) return size;
7302
7303        size = 0;
7304        if (((bitField0_ & 0x00000001) == 0x00000001)) {
7305          size += com.google.protobuf.CodedOutputStream
7306            .computeEnumSize(1, storageType_.getNumber());
7307        }
7308        if (((bitField0_ & 0x00000002) == 0x00000002)) {
7309          size += com.google.protobuf.CodedOutputStream
7310            .computeUInt64Size(2, quota_);
7311        }
7312        size += getUnknownFields().getSerializedSize();
7313        memoizedSerializedSize = size;
7314        return size;
7315      }
7316
7317      private static final long serialVersionUID = 0L;
7318      @java.lang.Override
7319      protected java.lang.Object writeReplace()
7320          throws java.io.ObjectStreamException {
7321        return super.writeReplace();
7322      }
7323
7324      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7325          com.google.protobuf.ByteString data)
7326          throws com.google.protobuf.InvalidProtocolBufferException {
7327        return PARSER.parseFrom(data);
7328      }
7329      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7330          com.google.protobuf.ByteString data,
7331          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7332          throws com.google.protobuf.InvalidProtocolBufferException {
7333        return PARSER.parseFrom(data, extensionRegistry);
7334      }
7335      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(byte[] data)
7336          throws com.google.protobuf.InvalidProtocolBufferException {
7337        return PARSER.parseFrom(data);
7338      }
7339      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7340          byte[] data,
7341          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7342          throws com.google.protobuf.InvalidProtocolBufferException {
7343        return PARSER.parseFrom(data, extensionRegistry);
7344      }
7345      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(java.io.InputStream input)
7346          throws java.io.IOException {
7347        return PARSER.parseFrom(input);
7348      }
7349      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7350          java.io.InputStream input,
7351          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7352          throws java.io.IOException {
7353        return PARSER.parseFrom(input, extensionRegistry);
7354      }
7355      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom(java.io.InputStream input)
7356          throws java.io.IOException {
7357        return PARSER.parseDelimitedFrom(input);
7358      }
7359      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom(
7360          java.io.InputStream input,
7361          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7362          throws java.io.IOException {
7363        return PARSER.parseDelimitedFrom(input, extensionRegistry);
7364      }
7365      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7366          com.google.protobuf.CodedInputStream input)
7367          throws java.io.IOException {
7368        return PARSER.parseFrom(input);
7369      }
7370      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
7371          com.google.protobuf.CodedInputStream input,
7372          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7373          throws java.io.IOException {
7374        return PARSER.parseFrom(input, extensionRegistry);
7375      }
7376
7377      public static Builder newBuilder() { return Builder.create(); }
7378      public Builder newBuilderForType() { return newBuilder(); }
7379      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto prototype) {
7380        return newBuilder().mergeFrom(prototype);
7381      }
7382      public Builder toBuilder() { return newBuilder(this); }
7383
7384      @java.lang.Override
7385      protected Builder newBuilderForType(
7386          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7387        Builder builder = new Builder(parent);
7388        return builder;
7389      }
7390      /**
7391       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto}
7392       */
7393      public static final class Builder extends
7394          com.google.protobuf.GeneratedMessage.Builder<Builder>
7395         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder {
7396        public static final com.google.protobuf.Descriptors.Descriptor
7397            getDescriptor() {
7398          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
7399        }
7400
7401        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7402            internalGetFieldAccessorTable() {
7403          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable
7404              .ensureFieldAccessorsInitialized(
7405                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class);
7406        }
7407
7408        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.newBuilder()
7409        private Builder() {
7410          maybeForceBuilderInitialization();
7411        }
7412
7413        private Builder(
7414            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7415          super(parent);
7416          maybeForceBuilderInitialization();
7417        }
7418        private void maybeForceBuilderInitialization() {
7419          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7420          }
7421        }
7422        private static Builder create() {
7423          return new Builder();
7424        }
7425
7426        public Builder clear() {
7427          super.clear();
7428          storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
7429          bitField0_ = (bitField0_ & ~0x00000001);
7430          quota_ = 0L;
7431          bitField0_ = (bitField0_ & ~0x00000002);
7432          return this;
7433        }
7434
7435        public Builder clone() {
7436          return create().mergeFrom(buildPartial());
7437        }
7438
7439        public com.google.protobuf.Descriptors.Descriptor
7440            getDescriptorForType() {
7441          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
7442        }
7443
7444        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstanceForType() {
7445          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance();
7446        }
7447
7448        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto build() {
7449          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = buildPartial();
7450          if (!result.isInitialized()) {
7451            throw newUninitializedMessageException(result);
7452          }
7453          return result;
7454        }
7455
7456        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto buildPartial() {
7457          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto(this);
7458          int from_bitField0_ = bitField0_;
7459          int to_bitField0_ = 0;
7460          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7461            to_bitField0_ |= 0x00000001;
7462          }
7463          result.storageType_ = storageType_;
7464          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7465            to_bitField0_ |= 0x00000002;
7466          }
7467          result.quota_ = quota_;
7468          result.bitField0_ = to_bitField0_;
7469          onBuilt();
7470          return result;
7471        }
7472
7473        public Builder mergeFrom(com.google.protobuf.Message other) {
7474          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) {
7475            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto)other);
7476          } else {
7477            super.mergeFrom(other);
7478            return this;
7479          }
7480        }
7481
7482        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto other) {
7483          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance()) return this;
7484          if (other.hasStorageType()) {
7485            setStorageType(other.getStorageType());
7486          }
7487          if (other.hasQuota()) {
7488            setQuota(other.getQuota());
7489          }
7490          this.mergeUnknownFields(other.getUnknownFields());
7491          return this;
7492        }
7493
7494        public final boolean isInitialized() {
7495          if (!hasStorageType()) {
7496            
7497            return false;
7498          }
7499          if (!hasQuota()) {
7500            
7501            return false;
7502          }
7503          return true;
7504        }
7505
7506        public Builder mergeFrom(
7507            com.google.protobuf.CodedInputStream input,
7508            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7509            throws java.io.IOException {
7510          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parsedMessage = null;
7511          try {
7512            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7513          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7514            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) e.getUnfinishedMessage();
7515            throw e;
7516          } finally {
7517            if (parsedMessage != null) {
7518              mergeFrom(parsedMessage);
7519            }
7520          }
7521          return this;
7522        }
7523        private int bitField0_;
7524
7525        // required .hadoop.hdfs.StorageTypeProto storageType = 1;
7526        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
7527        /**
7528         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7529         */
7530        public boolean hasStorageType() {
7531          return ((bitField0_ & 0x00000001) == 0x00000001);
7532        }
7533        /**
7534         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7535         */
7536        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
7537          return storageType_;
7538        }
7539        /**
7540         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7541         */
7542        public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
7543          if (value == null) {
7544            throw new NullPointerException();
7545          }
7546          bitField0_ |= 0x00000001;
7547          storageType_ = value;
7548          onChanged();
7549          return this;
7550        }
7551        /**
7552         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
7553         */
7554        public Builder clearStorageType() {
7555          bitField0_ = (bitField0_ & ~0x00000001);
7556          storageType_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK;
7557          onChanged();
7558          return this;
7559        }
7560
7561        // required uint64 quota = 2;
7562        private long quota_ ;
7563        /**
7564         * <code>required uint64 quota = 2;</code>
7565         */
7566        public boolean hasQuota() {
7567          return ((bitField0_ & 0x00000002) == 0x00000002);
7568        }
7569        /**
7570         * <code>required uint64 quota = 2;</code>
7571         */
7572        public long getQuota() {
7573          return quota_;
7574        }
7575        /**
7576         * <code>required uint64 quota = 2;</code>
7577         */
7578        public Builder setQuota(long value) {
7579          bitField0_ |= 0x00000002;
7580          quota_ = value;
7581          onChanged();
7582          return this;
7583        }
7584        /**
7585         * <code>required uint64 quota = 2;</code>
7586         */
7587        public Builder clearQuota() {
7588          bitField0_ = (bitField0_ & ~0x00000002);
7589          quota_ = 0L;
7590          onChanged();
7591          return this;
7592        }
7593
7594        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
7595      }
7596
7597      static {
7598        defaultInstance = new QuotaByStorageTypeEntryProto(true);
7599        defaultInstance.initFields();
7600      }
7601
7602      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
7603    }
7604
7605    public interface QuotaByStorageTypeFeatureProtoOrBuilder
7606        extends com.google.protobuf.MessageOrBuilder {
7607
7608      // repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;
7609      /**
7610       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7611       */
7612      java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> 
7613          getQuotasList();
7614      /**
7615       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7616       */
7617      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index);
7618      /**
7619       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7620       */
7621      int getQuotasCount();
7622      /**
7623       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7624       */
7625      java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
7626          getQuotasOrBuilderList();
7627      /**
7628       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7629       */
7630      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
7631          int index);
7632    }
7633    /**
7634     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto}
7635     */
7636    public static final class QuotaByStorageTypeFeatureProto extends
7637        com.google.protobuf.GeneratedMessage
7638        implements QuotaByStorageTypeFeatureProtoOrBuilder {
7639      // Use QuotaByStorageTypeFeatureProto.newBuilder() to construct.
7640      private QuotaByStorageTypeFeatureProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7641        super(builder);
7642        this.unknownFields = builder.getUnknownFields();
7643      }
7644      private QuotaByStorageTypeFeatureProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7645
7646      private static final QuotaByStorageTypeFeatureProto defaultInstance;
7647      public static QuotaByStorageTypeFeatureProto getDefaultInstance() {
7648        return defaultInstance;
7649      }
7650
7651      public QuotaByStorageTypeFeatureProto getDefaultInstanceForType() {
7652        return defaultInstance;
7653      }
7654
7655      private final com.google.protobuf.UnknownFieldSet unknownFields;
7656      @java.lang.Override
7657      public final com.google.protobuf.UnknownFieldSet
7658          getUnknownFields() {
7659        return this.unknownFields;
7660      }
7661      private QuotaByStorageTypeFeatureProto(
7662          com.google.protobuf.CodedInputStream input,
7663          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7664          throws com.google.protobuf.InvalidProtocolBufferException {
7665        initFields();
7666        int mutable_bitField0_ = 0;
7667        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7668            com.google.protobuf.UnknownFieldSet.newBuilder();
7669        try {
7670          boolean done = false;
7671          while (!done) {
7672            int tag = input.readTag();
7673            switch (tag) {
7674              case 0:
7675                done = true;
7676                break;
7677              default: {
7678                if (!parseUnknownField(input, unknownFields,
7679                                       extensionRegistry, tag)) {
7680                  done = true;
7681                }
7682                break;
7683              }
7684              case 10: {
7685                if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
7686                  quotas_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto>();
7687                  mutable_bitField0_ |= 0x00000001;
7688                }
7689                quotas_.add(input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.PARSER, extensionRegistry));
7690                break;
7691              }
7692            }
7693          }
7694        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7695          throw e.setUnfinishedMessage(this);
7696        } catch (java.io.IOException e) {
7697          throw new com.google.protobuf.InvalidProtocolBufferException(
7698              e.getMessage()).setUnfinishedMessage(this);
7699        } finally {
7700          if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
7701            quotas_ = java.util.Collections.unmodifiableList(quotas_);
7702          }
7703          this.unknownFields = unknownFields.build();
7704          makeExtensionsImmutable();
7705        }
7706      }
7707      public static final com.google.protobuf.Descriptors.Descriptor
7708          getDescriptor() {
7709        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
7710      }
7711
7712      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7713          internalGetFieldAccessorTable() {
7714        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable
7715            .ensureFieldAccessorsInitialized(
7716                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class);
7717      }
7718
7719      public static com.google.protobuf.Parser<QuotaByStorageTypeFeatureProto> PARSER =
7720          new com.google.protobuf.AbstractParser<QuotaByStorageTypeFeatureProto>() {
7721        public QuotaByStorageTypeFeatureProto parsePartialFrom(
7722            com.google.protobuf.CodedInputStream input,
7723            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7724            throws com.google.protobuf.InvalidProtocolBufferException {
7725          return new QuotaByStorageTypeFeatureProto(input, extensionRegistry);
7726        }
7727      };
7728
7729      @java.lang.Override
7730      public com.google.protobuf.Parser<QuotaByStorageTypeFeatureProto> getParserForType() {
7731        return PARSER;
7732      }
7733
7734      // repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;
7735      public static final int QUOTAS_FIELD_NUMBER = 1;
7736      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> quotas_;
7737      /**
7738       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7739       */
7740      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> getQuotasList() {
7741        return quotas_;
7742      }
7743      /**
7744       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7745       */
7746      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
7747          getQuotasOrBuilderList() {
7748        return quotas_;
7749      }
7750      /**
7751       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7752       */
7753      public int getQuotasCount() {
7754        return quotas_.size();
7755      }
7756      /**
7757       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7758       */
7759      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) {
7760        return quotas_.get(index);
7761      }
7762      /**
7763       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
7764       */
7765      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
7766          int index) {
7767        return quotas_.get(index);
7768      }
7769
7770      private void initFields() {
7771        quotas_ = java.util.Collections.emptyList();
7772      }
7773      private byte memoizedIsInitialized = -1;
7774      public final boolean isInitialized() {
7775        byte isInitialized = memoizedIsInitialized;
7776        if (isInitialized != -1) return isInitialized == 1;
7777
7778        for (int i = 0; i < getQuotasCount(); i++) {
7779          if (!getQuotas(i).isInitialized()) {
7780            memoizedIsInitialized = 0;
7781            return false;
7782          }
7783        }
7784        memoizedIsInitialized = 1;
7785        return true;
7786      }
7787
7788      public void writeTo(com.google.protobuf.CodedOutputStream output)
7789                          throws java.io.IOException {
7790        getSerializedSize();
7791        for (int i = 0; i < quotas_.size(); i++) {
7792          output.writeMessage(1, quotas_.get(i));
7793        }
7794        getUnknownFields().writeTo(output);
7795      }
7796
7797      private int memoizedSerializedSize = -1;
7798      public int getSerializedSize() {
7799        int size = memoizedSerializedSize;
7800        if (size != -1) return size;
7801
7802        size = 0;
7803        for (int i = 0; i < quotas_.size(); i++) {
7804          size += com.google.protobuf.CodedOutputStream
7805            .computeMessageSize(1, quotas_.get(i));
7806        }
7807        size += getUnknownFields().getSerializedSize();
7808        memoizedSerializedSize = size;
7809        return size;
7810      }
7811
7812      private static final long serialVersionUID = 0L;
7813      @java.lang.Override
7814      protected java.lang.Object writeReplace()
7815          throws java.io.ObjectStreamException {
7816        return super.writeReplace();
7817      }
7818
7819      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7820          com.google.protobuf.ByteString data)
7821          throws com.google.protobuf.InvalidProtocolBufferException {
7822        return PARSER.parseFrom(data);
7823      }
7824      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7825          com.google.protobuf.ByteString data,
7826          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7827          throws com.google.protobuf.InvalidProtocolBufferException {
7828        return PARSER.parseFrom(data, extensionRegistry);
7829      }
7830      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(byte[] data)
7831          throws com.google.protobuf.InvalidProtocolBufferException {
7832        return PARSER.parseFrom(data);
7833      }
7834      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7835          byte[] data,
7836          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7837          throws com.google.protobuf.InvalidProtocolBufferException {
7838        return PARSER.parseFrom(data, extensionRegistry);
7839      }
7840      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(java.io.InputStream input)
7841          throws java.io.IOException {
7842        return PARSER.parseFrom(input);
7843      }
7844      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7845          java.io.InputStream input,
7846          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7847          throws java.io.IOException {
7848        return PARSER.parseFrom(input, extensionRegistry);
7849      }
7850      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom(java.io.InputStream input)
7851          throws java.io.IOException {
7852        return PARSER.parseDelimitedFrom(input);
7853      }
7854      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom(
7855          java.io.InputStream input,
7856          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7857          throws java.io.IOException {
7858        return PARSER.parseDelimitedFrom(input, extensionRegistry);
7859      }
7860      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7861          com.google.protobuf.CodedInputStream input)
7862          throws java.io.IOException {
7863        return PARSER.parseFrom(input);
7864      }
7865      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
7866          com.google.protobuf.CodedInputStream input,
7867          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7868          throws java.io.IOException {
7869        return PARSER.parseFrom(input, extensionRegistry);
7870      }
7871
7872      public static Builder newBuilder() { return Builder.create(); }
7873      public Builder newBuilderForType() { return newBuilder(); }
7874      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto prototype) {
7875        return newBuilder().mergeFrom(prototype);
7876      }
7877      public Builder toBuilder() { return newBuilder(this); }
7878
7879      @java.lang.Override
7880      protected Builder newBuilderForType(
7881          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7882        Builder builder = new Builder(parent);
7883        return builder;
7884      }
7885      /**
7886       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto}
7887       */
7888      public static final class Builder extends
7889          com.google.protobuf.GeneratedMessage.Builder<Builder>
7890         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder {
7891        public static final com.google.protobuf.Descriptors.Descriptor
7892            getDescriptor() {
7893          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
7894        }
7895
7896        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7897            internalGetFieldAccessorTable() {
7898          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable
7899              .ensureFieldAccessorsInitialized(
7900                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class);
7901        }
7902
7903        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.newBuilder()
7904        private Builder() {
7905          maybeForceBuilderInitialization();
7906        }
7907
7908        private Builder(
7909            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7910          super(parent);
7911          maybeForceBuilderInitialization();
7912        }
7913        private void maybeForceBuilderInitialization() {
7914          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7915            getQuotasFieldBuilder();
7916          }
7917        }
7918        private static Builder create() {
7919          return new Builder();
7920        }
7921
7922        public Builder clear() {
7923          super.clear();
7924          if (quotasBuilder_ == null) {
7925            quotas_ = java.util.Collections.emptyList();
7926            bitField0_ = (bitField0_ & ~0x00000001);
7927          } else {
7928            quotasBuilder_.clear();
7929          }
7930          return this;
7931        }
7932
7933        public Builder clone() {
7934          return create().mergeFrom(buildPartial());
7935        }
7936
7937        public com.google.protobuf.Descriptors.Descriptor
7938            getDescriptorForType() {
7939          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
7940        }
7941
7942        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstanceForType() {
7943          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
7944        }
7945
7946        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto build() {
7947          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = buildPartial();
7948          if (!result.isInitialized()) {
7949            throw newUninitializedMessageException(result);
7950          }
7951          return result;
7952        }
7953
7954        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto buildPartial() {
7955          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto(this);
7956          int from_bitField0_ = bitField0_;
7957          if (quotasBuilder_ == null) {
7958            if (((bitField0_ & 0x00000001) == 0x00000001)) {
7959              quotas_ = java.util.Collections.unmodifiableList(quotas_);
7960              bitField0_ = (bitField0_ & ~0x00000001);
7961            }
7962            result.quotas_ = quotas_;
7963          } else {
7964            result.quotas_ = quotasBuilder_.build();
7965          }
7966          onBuilt();
7967          return result;
7968        }
7969
7970        public Builder mergeFrom(com.google.protobuf.Message other) {
7971          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) {
7972            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto)other);
7973          } else {
7974            super.mergeFrom(other);
7975            return this;
7976          }
7977        }
7978
7979        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto other) {
7980          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) return this;
7981          if (quotasBuilder_ == null) {
7982            if (!other.quotas_.isEmpty()) {
7983              if (quotas_.isEmpty()) {
7984                quotas_ = other.quotas_;
7985                bitField0_ = (bitField0_ & ~0x00000001);
7986              } else {
7987                ensureQuotasIsMutable();
7988                quotas_.addAll(other.quotas_);
7989              }
7990              onChanged();
7991            }
7992          } else {
7993            if (!other.quotas_.isEmpty()) {
7994              if (quotasBuilder_.isEmpty()) {
7995                quotasBuilder_.dispose();
7996                quotasBuilder_ = null;
7997                quotas_ = other.quotas_;
7998                bitField0_ = (bitField0_ & ~0x00000001);
7999                quotasBuilder_ = 
8000                  com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
8001                     getQuotasFieldBuilder() : null;
8002              } else {
8003                quotasBuilder_.addAllMessages(other.quotas_);
8004              }
8005            }
8006          }
8007          this.mergeUnknownFields(other.getUnknownFields());
8008          return this;
8009        }
8010
8011        public final boolean isInitialized() {
8012          for (int i = 0; i < getQuotasCount(); i++) {
8013            if (!getQuotas(i).isInitialized()) {
8014              
8015              return false;
8016            }
8017          }
8018          return true;
8019        }
8020
8021        public Builder mergeFrom(
8022            com.google.protobuf.CodedInputStream input,
8023            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8024            throws java.io.IOException {
8025          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parsedMessage = null;
8026          try {
8027            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8028          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8029            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) e.getUnfinishedMessage();
8030            throw e;
8031          } finally {
8032            if (parsedMessage != null) {
8033              mergeFrom(parsedMessage);
8034            }
8035          }
8036          return this;
8037        }
8038        private int bitField0_;
8039
8040        // repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;
8041        private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> quotas_ =
8042          java.util.Collections.emptyList();
8043        private void ensureQuotasIsMutable() {
8044          if (!((bitField0_ & 0x00000001) == 0x00000001)) {
8045            quotas_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto>(quotas_);
8046            bitField0_ |= 0x00000001;
8047           }
8048        }
8049
8050        private com.google.protobuf.RepeatedFieldBuilder<
8051            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> quotasBuilder_;
8052
8053        /**
8054         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8055         */
8056        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> getQuotasList() {
8057          if (quotasBuilder_ == null) {
8058            return java.util.Collections.unmodifiableList(quotas_);
8059          } else {
8060            return quotasBuilder_.getMessageList();
8061          }
8062        }
8063        /**
8064         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8065         */
8066        public int getQuotasCount() {
8067          if (quotasBuilder_ == null) {
8068            return quotas_.size();
8069          } else {
8070            return quotasBuilder_.getCount();
8071          }
8072        }
8073        /**
8074         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8075         */
8076        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) {
8077          if (quotasBuilder_ == null) {
8078            return quotas_.get(index);
8079          } else {
8080            return quotasBuilder_.getMessage(index);
8081          }
8082        }
8083        /**
8084         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8085         */
8086        public Builder setQuotas(
8087            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
8088          if (quotasBuilder_ == null) {
8089            if (value == null) {
8090              throw new NullPointerException();
8091            }
8092            ensureQuotasIsMutable();
8093            quotas_.set(index, value);
8094            onChanged();
8095          } else {
8096            quotasBuilder_.setMessage(index, value);
8097          }
8098          return this;
8099        }
8100        /**
8101         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8102         */
8103        public Builder setQuotas(
8104            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
8105          if (quotasBuilder_ == null) {
8106            ensureQuotasIsMutable();
8107            quotas_.set(index, builderForValue.build());
8108            onChanged();
8109          } else {
8110            quotasBuilder_.setMessage(index, builderForValue.build());
8111          }
8112          return this;
8113        }
8114        /**
8115         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8116         */
8117        public Builder addQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
8118          if (quotasBuilder_ == null) {
8119            if (value == null) {
8120              throw new NullPointerException();
8121            }
8122            ensureQuotasIsMutable();
8123            quotas_.add(value);
8124            onChanged();
8125          } else {
8126            quotasBuilder_.addMessage(value);
8127          }
8128          return this;
8129        }
8130        /**
8131         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8132         */
8133        public Builder addQuotas(
8134            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
8135          if (quotasBuilder_ == null) {
8136            if (value == null) {
8137              throw new NullPointerException();
8138            }
8139            ensureQuotasIsMutable();
8140            quotas_.add(index, value);
8141            onChanged();
8142          } else {
8143            quotasBuilder_.addMessage(index, value);
8144          }
8145          return this;
8146        }
8147        /**
8148         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8149         */
8150        public Builder addQuotas(
8151            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
8152          if (quotasBuilder_ == null) {
8153            ensureQuotasIsMutable();
8154            quotas_.add(builderForValue.build());
8155            onChanged();
8156          } else {
8157            quotasBuilder_.addMessage(builderForValue.build());
8158          }
8159          return this;
8160        }
8161        /**
8162         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8163         */
8164        public Builder addQuotas(
8165            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
8166          if (quotasBuilder_ == null) {
8167            ensureQuotasIsMutable();
8168            quotas_.add(index, builderForValue.build());
8169            onChanged();
8170          } else {
8171            quotasBuilder_.addMessage(index, builderForValue.build());
8172          }
8173          return this;
8174        }
8175        /**
8176         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8177         */
8178        public Builder addAllQuotas(
8179            java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> values) {
8180          if (quotasBuilder_ == null) {
8181            ensureQuotasIsMutable();
8182            super.addAll(values, quotas_);
8183            onChanged();
8184          } else {
8185            quotasBuilder_.addAllMessages(values);
8186          }
8187          return this;
8188        }
8189        /**
8190         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8191         */
8192        public Builder clearQuotas() {
8193          if (quotasBuilder_ == null) {
8194            quotas_ = java.util.Collections.emptyList();
8195            bitField0_ = (bitField0_ & ~0x00000001);
8196            onChanged();
8197          } else {
8198            quotasBuilder_.clear();
8199          }
8200          return this;
8201        }
8202        /**
8203         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8204         */
8205        public Builder removeQuotas(int index) {
8206          if (quotasBuilder_ == null) {
8207            ensureQuotasIsMutable();
8208            quotas_.remove(index);
8209            onChanged();
8210          } else {
8211            quotasBuilder_.remove(index);
8212          }
8213          return this;
8214        }
8215        /**
8216         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8217         */
8218        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder getQuotasBuilder(
8219            int index) {
8220          return getQuotasFieldBuilder().getBuilder(index);
8221        }
8222        /**
8223         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8224         */
8225        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
8226            int index) {
8227          if (quotasBuilder_ == null) {
8228            return quotas_.get(index);  } else {
8229            return quotasBuilder_.getMessageOrBuilder(index);
8230          }
8231        }
8232        /**
8233         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8234         */
8235        public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
8236             getQuotasOrBuilderList() {
8237          if (quotasBuilder_ != null) {
8238            return quotasBuilder_.getMessageOrBuilderList();
8239          } else {
8240            return java.util.Collections.unmodifiableList(quotas_);
8241          }
8242        }
8243        /**
8244         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8245         */
8246        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder() {
8247          return getQuotasFieldBuilder().addBuilder(
8248              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance());
8249        }
8250        /**
8251         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8252         */
8253        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder(
8254            int index) {
8255          return getQuotasFieldBuilder().addBuilder(
8256              index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance());
8257        }
8258        /**
8259         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
8260         */
8261        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder> 
8262             getQuotasBuilderList() {
8263          return getQuotasFieldBuilder().getBuilderList();
8264        }
8265        private com.google.protobuf.RepeatedFieldBuilder<
8266            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
8267            getQuotasFieldBuilder() {
8268          if (quotasBuilder_ == null) {
8269            quotasBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
8270                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder>(
8271                    quotas_,
8272                    ((bitField0_ & 0x00000001) == 0x00000001),
8273                    getParentForChildren(),
8274                    isClean());
8275            quotas_ = null;
8276          }
8277          return quotasBuilder_;
8278        }
8279
8280        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
8281      }
8282
8283      static {
8284        defaultInstance = new QuotaByStorageTypeFeatureProto(true);
8285        defaultInstance.initFields();
8286      }
8287
8288      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
8289    }
8290
8291    public interface INodeDirectoryOrBuilder
8292        extends com.google.protobuf.MessageOrBuilder {
8293
8294      // optional uint64 modificationTime = 1;
8295      /**
8296       * <code>optional uint64 modificationTime = 1;</code>
8297       */
8298      boolean hasModificationTime();
8299      /**
8300       * <code>optional uint64 modificationTime = 1;</code>
8301       */
8302      long getModificationTime();
8303
8304      // optional uint64 nsQuota = 2;
8305      /**
8306       * <code>optional uint64 nsQuota = 2;</code>
8307       *
8308       * <pre>
8309       * namespace quota
8310       * </pre>
8311       */
8312      boolean hasNsQuota();
8313      /**
8314       * <code>optional uint64 nsQuota = 2;</code>
8315       *
8316       * <pre>
8317       * namespace quota
8318       * </pre>
8319       */
8320      long getNsQuota();
8321
8322      // optional uint64 dsQuota = 3;
8323      /**
8324       * <code>optional uint64 dsQuota = 3;</code>
8325       *
8326       * <pre>
8327       * diskspace quota
8328       * </pre>
8329       */
8330      boolean hasDsQuota();
8331      /**
8332       * <code>optional uint64 dsQuota = 3;</code>
8333       *
8334       * <pre>
8335       * diskspace quota
8336       * </pre>
8337       */
8338      long getDsQuota();
8339
8340      // optional fixed64 permission = 4;
8341      /**
8342       * <code>optional fixed64 permission = 4;</code>
8343       */
8344      boolean hasPermission();
8345      /**
8346       * <code>optional fixed64 permission = 4;</code>
8347       */
8348      long getPermission();
8349
8350      // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;
8351      /**
8352       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8353       */
8354      boolean hasAcl();
8355      /**
8356       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8357       */
8358      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl();
8359      /**
8360       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8361       */
8362      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder();
8363
8364      // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;
8365      /**
8366       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8367       */
8368      boolean hasXAttrs();
8369      /**
8370       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8371       */
8372      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs();
8373      /**
8374       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8375       */
8376      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder();
8377
8378      // optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;
8379      /**
8380       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8381       */
8382      boolean hasTypeQuotas();
8383      /**
8384       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8385       */
8386      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas();
8387      /**
8388       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8389       */
8390      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder();
8391    }
8392    /**
8393     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory}
8394     */
8395    public static final class INodeDirectory extends
8396        com.google.protobuf.GeneratedMessage
8397        implements INodeDirectoryOrBuilder {
8398      // Use INodeDirectory.newBuilder() to construct.
8399      private INodeDirectory(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8400        super(builder);
8401        this.unknownFields = builder.getUnknownFields();
8402      }
8403      private INodeDirectory(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8404
8405      private static final INodeDirectory defaultInstance;
8406      public static INodeDirectory getDefaultInstance() {
8407        return defaultInstance;
8408      }
8409
8410      public INodeDirectory getDefaultInstanceForType() {
8411        return defaultInstance;
8412      }
8413
8414      private final com.google.protobuf.UnknownFieldSet unknownFields;
8415      @java.lang.Override
8416      public final com.google.protobuf.UnknownFieldSet
8417          getUnknownFields() {
8418        return this.unknownFields;
8419      }
8420      private INodeDirectory(
8421          com.google.protobuf.CodedInputStream input,
8422          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8423          throws com.google.protobuf.InvalidProtocolBufferException {
8424        initFields();
8425        int mutable_bitField0_ = 0;
8426        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8427            com.google.protobuf.UnknownFieldSet.newBuilder();
8428        try {
8429          boolean done = false;
8430          while (!done) {
8431            int tag = input.readTag();
8432            switch (tag) {
8433              case 0:
8434                done = true;
8435                break;
8436              default: {
8437                if (!parseUnknownField(input, unknownFields,
8438                                       extensionRegistry, tag)) {
8439                  done = true;
8440                }
8441                break;
8442              }
8443              case 8: {
8444                bitField0_ |= 0x00000001;
8445                modificationTime_ = input.readUInt64();
8446                break;
8447              }
8448              case 16: {
8449                bitField0_ |= 0x00000002;
8450                nsQuota_ = input.readUInt64();
8451                break;
8452              }
8453              case 24: {
8454                bitField0_ |= 0x00000004;
8455                dsQuota_ = input.readUInt64();
8456                break;
8457              }
8458              case 33: {
8459                bitField0_ |= 0x00000008;
8460                permission_ = input.readFixed64();
8461                break;
8462              }
8463              case 42: {
8464                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder subBuilder = null;
8465                if (((bitField0_ & 0x00000010) == 0x00000010)) {
8466                  subBuilder = acl_.toBuilder();
8467                }
8468                acl_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.PARSER, extensionRegistry);
8469                if (subBuilder != null) {
8470                  subBuilder.mergeFrom(acl_);
8471                  acl_ = subBuilder.buildPartial();
8472                }
8473                bitField0_ |= 0x00000010;
8474                break;
8475              }
8476              case 50: {
8477                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder subBuilder = null;
8478                if (((bitField0_ & 0x00000020) == 0x00000020)) {
8479                  subBuilder = xAttrs_.toBuilder();
8480                }
8481                xAttrs_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.PARSER, extensionRegistry);
8482                if (subBuilder != null) {
8483                  subBuilder.mergeFrom(xAttrs_);
8484                  xAttrs_ = subBuilder.buildPartial();
8485                }
8486                bitField0_ |= 0x00000020;
8487                break;
8488              }
8489              case 58: {
8490                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder subBuilder = null;
8491                if (((bitField0_ & 0x00000040) == 0x00000040)) {
8492                  subBuilder = typeQuotas_.toBuilder();
8493                }
8494                typeQuotas_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.PARSER, extensionRegistry);
8495                if (subBuilder != null) {
8496                  subBuilder.mergeFrom(typeQuotas_);
8497                  typeQuotas_ = subBuilder.buildPartial();
8498                }
8499                bitField0_ |= 0x00000040;
8500                break;
8501              }
8502            }
8503          }
8504        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8505          throw e.setUnfinishedMessage(this);
8506        } catch (java.io.IOException e) {
8507          throw new com.google.protobuf.InvalidProtocolBufferException(
8508              e.getMessage()).setUnfinishedMessage(this);
8509        } finally {
8510          this.unknownFields = unknownFields.build();
8511          makeExtensionsImmutable();
8512        }
8513      }
8514      public static final com.google.protobuf.Descriptors.Descriptor
8515          getDescriptor() {
8516        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
8517      }
8518
8519      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8520          internalGetFieldAccessorTable() {
8521        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable
8522            .ensureFieldAccessorsInitialized(
8523                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class);
8524      }
8525
8526      public static com.google.protobuf.Parser<INodeDirectory> PARSER =
8527          new com.google.protobuf.AbstractParser<INodeDirectory>() {
8528        public INodeDirectory parsePartialFrom(
8529            com.google.protobuf.CodedInputStream input,
8530            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8531            throws com.google.protobuf.InvalidProtocolBufferException {
8532          return new INodeDirectory(input, extensionRegistry);
8533        }
8534      };
8535
8536      @java.lang.Override
8537      public com.google.protobuf.Parser<INodeDirectory> getParserForType() {
8538        return PARSER;
8539      }
8540
8541      private int bitField0_;
8542      // optional uint64 modificationTime = 1;
8543      public static final int MODIFICATIONTIME_FIELD_NUMBER = 1;
8544      private long modificationTime_;
8545      /**
8546       * <code>optional uint64 modificationTime = 1;</code>
8547       */
8548      public boolean hasModificationTime() {
8549        return ((bitField0_ & 0x00000001) == 0x00000001);
8550      }
8551      /**
8552       * <code>optional uint64 modificationTime = 1;</code>
8553       */
8554      public long getModificationTime() {
8555        return modificationTime_;
8556      }
8557
8558      // optional uint64 nsQuota = 2;
8559      public static final int NSQUOTA_FIELD_NUMBER = 2;
8560      private long nsQuota_;
8561      /**
8562       * <code>optional uint64 nsQuota = 2;</code>
8563       *
8564       * <pre>
8565       * namespace quota
8566       * </pre>
8567       */
8568      public boolean hasNsQuota() {
8569        return ((bitField0_ & 0x00000002) == 0x00000002);
8570      }
8571      /**
8572       * <code>optional uint64 nsQuota = 2;</code>
8573       *
8574       * <pre>
8575       * namespace quota
8576       * </pre>
8577       */
8578      public long getNsQuota() {
8579        return nsQuota_;
8580      }
8581
8582      // optional uint64 dsQuota = 3;
8583      public static final int DSQUOTA_FIELD_NUMBER = 3;
8584      private long dsQuota_;
8585      /**
8586       * <code>optional uint64 dsQuota = 3;</code>
8587       *
8588       * <pre>
8589       * diskspace quota
8590       * </pre>
8591       */
8592      public boolean hasDsQuota() {
8593        return ((bitField0_ & 0x00000004) == 0x00000004);
8594      }
8595      /**
8596       * <code>optional uint64 dsQuota = 3;</code>
8597       *
8598       * <pre>
8599       * diskspace quota
8600       * </pre>
8601       */
8602      public long getDsQuota() {
8603        return dsQuota_;
8604      }
8605
8606      // optional fixed64 permission = 4;
8607      public static final int PERMISSION_FIELD_NUMBER = 4;
8608      private long permission_;
8609      /**
8610       * <code>optional fixed64 permission = 4;</code>
8611       */
8612      public boolean hasPermission() {
8613        return ((bitField0_ & 0x00000008) == 0x00000008);
8614      }
8615      /**
8616       * <code>optional fixed64 permission = 4;</code>
8617       */
8618      public long getPermission() {
8619        return permission_;
8620      }
8621
8622      // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;
8623      public static final int ACL_FIELD_NUMBER = 5;
8624      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
8625      /**
8626       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8627       */
8628      public boolean hasAcl() {
8629        return ((bitField0_ & 0x00000010) == 0x00000010);
8630      }
8631      /**
8632       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8633       */
8634      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
8635        return acl_;
8636      }
8637      /**
8638       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
8639       */
8640      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
8641        return acl_;
8642      }
8643
8644      // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;
8645      public static final int XATTRS_FIELD_NUMBER = 6;
8646      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
8647      /**
8648       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8649       */
8650      public boolean hasXAttrs() {
8651        return ((bitField0_ & 0x00000020) == 0x00000020);
8652      }
8653      /**
8654       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8655       */
8656      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
8657        return xAttrs_;
8658      }
8659      /**
8660       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
8661       */
8662      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
8663        return xAttrs_;
8664      }
8665
8666      // optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;
8667      public static final int TYPEQUOTAS_FIELD_NUMBER = 7;
8668      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_;
8669      /**
8670       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8671       */
8672      public boolean hasTypeQuotas() {
8673        return ((bitField0_ & 0x00000040) == 0x00000040);
8674      }
8675      /**
8676       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8677       */
8678      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() {
8679        return typeQuotas_;
8680      }
8681      /**
8682       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
8683       */
8684      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() {
8685        return typeQuotas_;
8686      }
8687
8688      private void initFields() {
8689        modificationTime_ = 0L;
8690        nsQuota_ = 0L;
8691        dsQuota_ = 0L;
8692        permission_ = 0L;
8693        acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
8694        xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
8695        typeQuotas_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
8696      }
8697      private byte memoizedIsInitialized = -1;
8698      public final boolean isInitialized() {
8699        byte isInitialized = memoizedIsInitialized;
8700        if (isInitialized != -1) return isInitialized == 1;
8701
8702        if (hasXAttrs()) {
8703          if (!getXAttrs().isInitialized()) {
8704            memoizedIsInitialized = 0;
8705            return false;
8706          }
8707        }
8708        if (hasTypeQuotas()) {
8709          if (!getTypeQuotas().isInitialized()) {
8710            memoizedIsInitialized = 0;
8711            return false;
8712          }
8713        }
8714        memoizedIsInitialized = 1;
8715        return true;
8716      }
8717
8718      public void writeTo(com.google.protobuf.CodedOutputStream output)
8719                          throws java.io.IOException {
8720        getSerializedSize();
8721        if (((bitField0_ & 0x00000001) == 0x00000001)) {
8722          output.writeUInt64(1, modificationTime_);
8723        }
8724        if (((bitField0_ & 0x00000002) == 0x00000002)) {
8725          output.writeUInt64(2, nsQuota_);
8726        }
8727        if (((bitField0_ & 0x00000004) == 0x00000004)) {
8728          output.writeUInt64(3, dsQuota_);
8729        }
8730        if (((bitField0_ & 0x00000008) == 0x00000008)) {
8731          output.writeFixed64(4, permission_);
8732        }
8733        if (((bitField0_ & 0x00000010) == 0x00000010)) {
8734          output.writeMessage(5, acl_);
8735        }
8736        if (((bitField0_ & 0x00000020) == 0x00000020)) {
8737          output.writeMessage(6, xAttrs_);
8738        }
8739        if (((bitField0_ & 0x00000040) == 0x00000040)) {
8740          output.writeMessage(7, typeQuotas_);
8741        }
8742        getUnknownFields().writeTo(output);
8743      }
8744
8745      private int memoizedSerializedSize = -1;
8746      public int getSerializedSize() {
8747        int size = memoizedSerializedSize;
8748        if (size != -1) return size;
8749
8750        size = 0;
8751        if (((bitField0_ & 0x00000001) == 0x00000001)) {
8752          size += com.google.protobuf.CodedOutputStream
8753            .computeUInt64Size(1, modificationTime_);
8754        }
8755        if (((bitField0_ & 0x00000002) == 0x00000002)) {
8756          size += com.google.protobuf.CodedOutputStream
8757            .computeUInt64Size(2, nsQuota_);
8758        }
8759        if (((bitField0_ & 0x00000004) == 0x00000004)) {
8760          size += com.google.protobuf.CodedOutputStream
8761            .computeUInt64Size(3, dsQuota_);
8762        }
8763        if (((bitField0_ & 0x00000008) == 0x00000008)) {
8764          size += com.google.protobuf.CodedOutputStream
8765            .computeFixed64Size(4, permission_);
8766        }
8767        if (((bitField0_ & 0x00000010) == 0x00000010)) {
8768          size += com.google.protobuf.CodedOutputStream
8769            .computeMessageSize(5, acl_);
8770        }
8771        if (((bitField0_ & 0x00000020) == 0x00000020)) {
8772          size += com.google.protobuf.CodedOutputStream
8773            .computeMessageSize(6, xAttrs_);
8774        }
8775        if (((bitField0_ & 0x00000040) == 0x00000040)) {
8776          size += com.google.protobuf.CodedOutputStream
8777            .computeMessageSize(7, typeQuotas_);
8778        }
8779        size += getUnknownFields().getSerializedSize();
8780        memoizedSerializedSize = size;
8781        return size;
8782      }
8783
8784      private static final long serialVersionUID = 0L;
8785      @java.lang.Override
8786      protected java.lang.Object writeReplace()
8787          throws java.io.ObjectStreamException {
8788        return super.writeReplace();
8789      }
8790
8791      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8792          com.google.protobuf.ByteString data)
8793          throws com.google.protobuf.InvalidProtocolBufferException {
8794        return PARSER.parseFrom(data);
8795      }
8796      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8797          com.google.protobuf.ByteString data,
8798          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8799          throws com.google.protobuf.InvalidProtocolBufferException {
8800        return PARSER.parseFrom(data, extensionRegistry);
8801      }
8802      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(byte[] data)
8803          throws com.google.protobuf.InvalidProtocolBufferException {
8804        return PARSER.parseFrom(data);
8805      }
8806      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8807          byte[] data,
8808          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8809          throws com.google.protobuf.InvalidProtocolBufferException {
8810        return PARSER.parseFrom(data, extensionRegistry);
8811      }
8812      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(java.io.InputStream input)
8813          throws java.io.IOException {
8814        return PARSER.parseFrom(input);
8815      }
8816      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8817          java.io.InputStream input,
8818          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8819          throws java.io.IOException {
8820        return PARSER.parseFrom(input, extensionRegistry);
8821      }
8822      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom(java.io.InputStream input)
8823          throws java.io.IOException {
8824        return PARSER.parseDelimitedFrom(input);
8825      }
8826      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom(
8827          java.io.InputStream input,
8828          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8829          throws java.io.IOException {
8830        return PARSER.parseDelimitedFrom(input, extensionRegistry);
8831      }
8832      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8833          com.google.protobuf.CodedInputStream input)
8834          throws java.io.IOException {
8835        return PARSER.parseFrom(input);
8836      }
8837      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
8838          com.google.protobuf.CodedInputStream input,
8839          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8840          throws java.io.IOException {
8841        return PARSER.parseFrom(input, extensionRegistry);
8842      }
8843
8844      public static Builder newBuilder() { return Builder.create(); }
8845      public Builder newBuilderForType() { return newBuilder(); }
8846      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory prototype) {
8847        return newBuilder().mergeFrom(prototype);
8848      }
8849      public Builder toBuilder() { return newBuilder(this); }
8850
8851      @java.lang.Override
8852      protected Builder newBuilderForType(
8853          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8854        Builder builder = new Builder(parent);
8855        return builder;
8856      }
8857      /**
8858       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory}
8859       */
8860      public static final class Builder extends
8861          com.google.protobuf.GeneratedMessage.Builder<Builder>
8862         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder {
8863        public static final com.google.protobuf.Descriptors.Descriptor
8864            getDescriptor() {
8865          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
8866        }
8867
8868        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8869            internalGetFieldAccessorTable() {
8870          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable
8871              .ensureFieldAccessorsInitialized(
8872                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class);
8873        }
8874
8875        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder()
8876        private Builder() {
8877          maybeForceBuilderInitialization();
8878        }
8879
8880        private Builder(
8881            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8882          super(parent);
8883          maybeForceBuilderInitialization();
8884        }
8885        private void maybeForceBuilderInitialization() {
8886          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8887            getAclFieldBuilder();
8888            getXAttrsFieldBuilder();
8889            getTypeQuotasFieldBuilder();
8890          }
8891        }
8892        private static Builder create() {
8893          return new Builder();
8894        }
8895
8896        public Builder clear() {
8897          super.clear();
8898          modificationTime_ = 0L;
8899          bitField0_ = (bitField0_ & ~0x00000001);
8900          nsQuota_ = 0L;
8901          bitField0_ = (bitField0_ & ~0x00000002);
8902          dsQuota_ = 0L;
8903          bitField0_ = (bitField0_ & ~0x00000004);
8904          permission_ = 0L;
8905          bitField0_ = (bitField0_ & ~0x00000008);
8906          if (aclBuilder_ == null) {
8907            acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
8908          } else {
8909            aclBuilder_.clear();
8910          }
8911          bitField0_ = (bitField0_ & ~0x00000010);
8912          if (xAttrsBuilder_ == null) {
8913            xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
8914          } else {
8915            xAttrsBuilder_.clear();
8916          }
8917          bitField0_ = (bitField0_ & ~0x00000020);
8918          if (typeQuotasBuilder_ == null) {
8919            typeQuotas_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
8920          } else {
8921            typeQuotasBuilder_.clear();
8922          }
8923          bitField0_ = (bitField0_ & ~0x00000040);
8924          return this;
8925        }
8926
8927        public Builder clone() {
8928          return create().mergeFrom(buildPartial());
8929        }
8930
8931        public com.google.protobuf.Descriptors.Descriptor
8932            getDescriptorForType() {
8933          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
8934        }
8935
8936        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstanceForType() {
8937          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
8938        }
8939
8940        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory build() {
8941          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = buildPartial();
8942          if (!result.isInitialized()) {
8943            throw newUninitializedMessageException(result);
8944          }
8945          return result;
8946        }
8947
8948        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory buildPartial() {
8949          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory(this);
8950          int from_bitField0_ = bitField0_;
8951          int to_bitField0_ = 0;
8952          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8953            to_bitField0_ |= 0x00000001;
8954          }
8955          result.modificationTime_ = modificationTime_;
8956          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
8957            to_bitField0_ |= 0x00000002;
8958          }
8959          result.nsQuota_ = nsQuota_;
8960          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
8961            to_bitField0_ |= 0x00000004;
8962          }
8963          result.dsQuota_ = dsQuota_;
8964          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
8965            to_bitField0_ |= 0x00000008;
8966          }
8967          result.permission_ = permission_;
8968          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
8969            to_bitField0_ |= 0x00000010;
8970          }
8971          if (aclBuilder_ == null) {
8972            result.acl_ = acl_;
8973          } else {
8974            result.acl_ = aclBuilder_.build();
8975          }
8976          if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
8977            to_bitField0_ |= 0x00000020;
8978          }
8979          if (xAttrsBuilder_ == null) {
8980            result.xAttrs_ = xAttrs_;
8981          } else {
8982            result.xAttrs_ = xAttrsBuilder_.build();
8983          }
8984          if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
8985            to_bitField0_ |= 0x00000040;
8986          }
8987          if (typeQuotasBuilder_ == null) {
8988            result.typeQuotas_ = typeQuotas_;
8989          } else {
8990            result.typeQuotas_ = typeQuotasBuilder_.build();
8991          }
8992          result.bitField0_ = to_bitField0_;
8993          onBuilt();
8994          return result;
8995        }
8996
8997        public Builder mergeFrom(com.google.protobuf.Message other) {
8998          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) {
8999            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory)other);
9000          } else {
9001            super.mergeFrom(other);
9002            return this;
9003          }
9004        }
9005
9006        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory other) {
9007          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) return this;
9008          if (other.hasModificationTime()) {
9009            setModificationTime(other.getModificationTime());
9010          }
9011          if (other.hasNsQuota()) {
9012            setNsQuota(other.getNsQuota());
9013          }
9014          if (other.hasDsQuota()) {
9015            setDsQuota(other.getDsQuota());
9016          }
9017          if (other.hasPermission()) {
9018            setPermission(other.getPermission());
9019          }
9020          if (other.hasAcl()) {
9021            mergeAcl(other.getAcl());
9022          }
9023          if (other.hasXAttrs()) {
9024            mergeXAttrs(other.getXAttrs());
9025          }
9026          if (other.hasTypeQuotas()) {
9027            mergeTypeQuotas(other.getTypeQuotas());
9028          }
9029          this.mergeUnknownFields(other.getUnknownFields());
9030          return this;
9031        }
9032
9033        public final boolean isInitialized() {
9034          if (hasXAttrs()) {
9035            if (!getXAttrs().isInitialized()) {
9036              
9037              return false;
9038            }
9039          }
9040          if (hasTypeQuotas()) {
9041            if (!getTypeQuotas().isInitialized()) {
9042              
9043              return false;
9044            }
9045          }
9046          return true;
9047        }
9048
9049        public Builder mergeFrom(
9050            com.google.protobuf.CodedInputStream input,
9051            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9052            throws java.io.IOException {
9053          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parsedMessage = null;
9054          try {
9055            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9056          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9057            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) e.getUnfinishedMessage();
9058            throw e;
9059          } finally {
9060            if (parsedMessage != null) {
9061              mergeFrom(parsedMessage);
9062            }
9063          }
9064          return this;
9065        }
9066        private int bitField0_;
9067
9068        // optional uint64 modificationTime = 1;
9069        private long modificationTime_ ;
9070        /**
9071         * <code>optional uint64 modificationTime = 1;</code>
9072         */
9073        public boolean hasModificationTime() {
9074          return ((bitField0_ & 0x00000001) == 0x00000001);
9075        }
9076        /**
9077         * <code>optional uint64 modificationTime = 1;</code>
9078         */
9079        public long getModificationTime() {
9080          return modificationTime_;
9081        }
9082        /**
9083         * <code>optional uint64 modificationTime = 1;</code>
9084         */
9085        public Builder setModificationTime(long value) {
9086          bitField0_ |= 0x00000001;
9087          modificationTime_ = value;
9088          onChanged();
9089          return this;
9090        }
9091        /**
9092         * <code>optional uint64 modificationTime = 1;</code>
9093         */
9094        public Builder clearModificationTime() {
9095          bitField0_ = (bitField0_ & ~0x00000001);
9096          modificationTime_ = 0L;
9097          onChanged();
9098          return this;
9099        }
9100
9101        // optional uint64 nsQuota = 2;
9102        private long nsQuota_ ;
9103        /**
9104         * <code>optional uint64 nsQuota = 2;</code>
9105         *
9106         * <pre>
9107         * namespace quota
9108         * </pre>
9109         */
9110        public boolean hasNsQuota() {
9111          return ((bitField0_ & 0x00000002) == 0x00000002);
9112        }
9113        /**
9114         * <code>optional uint64 nsQuota = 2;</code>
9115         *
9116         * <pre>
9117         * namespace quota
9118         * </pre>
9119         */
9120        public long getNsQuota() {
9121          return nsQuota_;
9122        }
9123        /**
9124         * <code>optional uint64 nsQuota = 2;</code>
9125         *
9126         * <pre>
9127         * namespace quota
9128         * </pre>
9129         */
9130        public Builder setNsQuota(long value) {
9131          bitField0_ |= 0x00000002;
9132          nsQuota_ = value;
9133          onChanged();
9134          return this;
9135        }
9136        /**
9137         * <code>optional uint64 nsQuota = 2;</code>
9138         *
9139         * <pre>
9140         * namespace quota
9141         * </pre>
9142         */
9143        public Builder clearNsQuota() {
9144          bitField0_ = (bitField0_ & ~0x00000002);
9145          nsQuota_ = 0L;
9146          onChanged();
9147          return this;
9148        }
9149
9150        // optional uint64 dsQuota = 3;
9151        private long dsQuota_ ;
9152        /**
9153         * <code>optional uint64 dsQuota = 3;</code>
9154         *
9155         * <pre>
9156         * diskspace quota
9157         * </pre>
9158         */
9159        public boolean hasDsQuota() {
9160          return ((bitField0_ & 0x00000004) == 0x00000004);
9161        }
9162        /**
9163         * <code>optional uint64 dsQuota = 3;</code>
9164         *
9165         * <pre>
9166         * diskspace quota
9167         * </pre>
9168         */
9169        public long getDsQuota() {
9170          return dsQuota_;
9171        }
9172        /**
9173         * <code>optional uint64 dsQuota = 3;</code>
9174         *
9175         * <pre>
9176         * diskspace quota
9177         * </pre>
9178         */
9179        public Builder setDsQuota(long value) {
9180          bitField0_ |= 0x00000004;
9181          dsQuota_ = value;
9182          onChanged();
9183          return this;
9184        }
9185        /**
9186         * <code>optional uint64 dsQuota = 3;</code>
9187         *
9188         * <pre>
9189         * diskspace quota
9190         * </pre>
9191         */
9192        public Builder clearDsQuota() {
9193          bitField0_ = (bitField0_ & ~0x00000004);
9194          dsQuota_ = 0L;
9195          onChanged();
9196          return this;
9197        }
9198
9199        // optional fixed64 permission = 4;
9200        private long permission_ ;
9201        /**
9202         * <code>optional fixed64 permission = 4;</code>
9203         */
9204        public boolean hasPermission() {
9205          return ((bitField0_ & 0x00000008) == 0x00000008);
9206        }
9207        /**
9208         * <code>optional fixed64 permission = 4;</code>
9209         */
9210        public long getPermission() {
9211          return permission_;
9212        }
9213        /**
9214         * <code>optional fixed64 permission = 4;</code>
9215         */
9216        public Builder setPermission(long value) {
9217          bitField0_ |= 0x00000008;
9218          permission_ = value;
9219          onChanged();
9220          return this;
9221        }
9222        /**
9223         * <code>optional fixed64 permission = 4;</code>
9224         */
9225        public Builder clearPermission() {
9226          bitField0_ = (bitField0_ & ~0x00000008);
9227          permission_ = 0L;
9228          onChanged();
9229          return this;
9230        }
9231
9232        // optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;
9233        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
9234        private com.google.protobuf.SingleFieldBuilder<
9235            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_;
9236        /**
9237         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9238         */
9239        public boolean hasAcl() {
9240          return ((bitField0_ & 0x00000010) == 0x00000010);
9241        }
9242        /**
9243         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9244         */
9245        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
9246          if (aclBuilder_ == null) {
9247            return acl_;
9248          } else {
9249            return aclBuilder_.getMessage();
9250          }
9251        }
9252        /**
9253         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9254         */
9255        public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
9256          if (aclBuilder_ == null) {
9257            if (value == null) {
9258              throw new NullPointerException();
9259            }
9260            acl_ = value;
9261            onChanged();
9262          } else {
9263            aclBuilder_.setMessage(value);
9264          }
9265          bitField0_ |= 0x00000010;
9266          return this;
9267        }
9268        /**
9269         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9270         */
9271        public Builder setAcl(
9272            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) {
9273          if (aclBuilder_ == null) {
9274            acl_ = builderForValue.build();
9275            onChanged();
9276          } else {
9277            aclBuilder_.setMessage(builderForValue.build());
9278          }
9279          bitField0_ |= 0x00000010;
9280          return this;
9281        }
9282        /**
9283         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9284         */
9285        public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
9286          if (aclBuilder_ == null) {
9287            if (((bitField0_ & 0x00000010) == 0x00000010) &&
9288                acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) {
9289              acl_ =
9290                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder(acl_).mergeFrom(value).buildPartial();
9291            } else {
9292              acl_ = value;
9293            }
9294            onChanged();
9295          } else {
9296            aclBuilder_.mergeFrom(value);
9297          }
9298          bitField0_ |= 0x00000010;
9299          return this;
9300        }
9301        /**
9302         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9303         */
9304        public Builder clearAcl() {
9305          if (aclBuilder_ == null) {
9306            acl_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
9307            onChanged();
9308          } else {
9309            aclBuilder_.clear();
9310          }
9311          bitField0_ = (bitField0_ & ~0x00000010);
9312          return this;
9313        }
9314        /**
9315         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9316         */
9317        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() {
9318          bitField0_ |= 0x00000010;
9319          onChanged();
9320          return getAclFieldBuilder().getBuilder();
9321        }
9322        /**
9323         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9324         */
9325        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
9326          if (aclBuilder_ != null) {
9327            return aclBuilder_.getMessageOrBuilder();
9328          } else {
9329            return acl_;
9330          }
9331        }
9332        /**
9333         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
9334         */
9335        private com.google.protobuf.SingleFieldBuilder<
9336            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> 
9337            getAclFieldBuilder() {
9338          if (aclBuilder_ == null) {
9339            aclBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9340                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>(
9341                    acl_,
9342                    getParentForChildren(),
9343                    isClean());
9344            acl_ = null;
9345          }
9346          return aclBuilder_;
9347        }
9348
9349        // optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;
9350        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
9351        private com.google.protobuf.SingleFieldBuilder<
9352            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_;
9353        /**
9354         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9355         */
9356        public boolean hasXAttrs() {
9357          return ((bitField0_ & 0x00000020) == 0x00000020);
9358        }
9359        /**
9360         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9361         */
9362        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
9363          if (xAttrsBuilder_ == null) {
9364            return xAttrs_;
9365          } else {
9366            return xAttrsBuilder_.getMessage();
9367          }
9368        }
9369        /**
9370         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9371         */
9372        public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
9373          if (xAttrsBuilder_ == null) {
9374            if (value == null) {
9375              throw new NullPointerException();
9376            }
9377            xAttrs_ = value;
9378            onChanged();
9379          } else {
9380            xAttrsBuilder_.setMessage(value);
9381          }
9382          bitField0_ |= 0x00000020;
9383          return this;
9384        }
9385        /**
9386         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9387         */
9388        public Builder setXAttrs(
9389            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) {
9390          if (xAttrsBuilder_ == null) {
9391            xAttrs_ = builderForValue.build();
9392            onChanged();
9393          } else {
9394            xAttrsBuilder_.setMessage(builderForValue.build());
9395          }
9396          bitField0_ |= 0x00000020;
9397          return this;
9398        }
9399        /**
9400         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9401         */
9402        public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
9403          if (xAttrsBuilder_ == null) {
9404            if (((bitField0_ & 0x00000020) == 0x00000020) &&
9405                xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) {
9406              xAttrs_ =
9407                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder(xAttrs_).mergeFrom(value).buildPartial();
9408            } else {
9409              xAttrs_ = value;
9410            }
9411            onChanged();
9412          } else {
9413            xAttrsBuilder_.mergeFrom(value);
9414          }
9415          bitField0_ |= 0x00000020;
9416          return this;
9417        }
9418        /**
9419         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9420         */
9421        public Builder clearXAttrs() {
9422          if (xAttrsBuilder_ == null) {
9423            xAttrs_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
9424            onChanged();
9425          } else {
9426            xAttrsBuilder_.clear();
9427          }
9428          bitField0_ = (bitField0_ & ~0x00000020);
9429          return this;
9430        }
9431        /**
9432         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9433         */
9434        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() {
9435          bitField0_ |= 0x00000020;
9436          onChanged();
9437          return getXAttrsFieldBuilder().getBuilder();
9438        }
9439        /**
9440         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9441         */
9442        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
9443          if (xAttrsBuilder_ != null) {
9444            return xAttrsBuilder_.getMessageOrBuilder();
9445          } else {
9446            return xAttrs_;
9447          }
9448        }
9449        /**
9450         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
9451         */
9452        private com.google.protobuf.SingleFieldBuilder<
9453            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> 
9454            getXAttrsFieldBuilder() {
9455          if (xAttrsBuilder_ == null) {
9456            xAttrsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9457                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>(
9458                    xAttrs_,
9459                    getParentForChildren(),
9460                    isClean());
9461            xAttrs_ = null;
9462          }
9463          return xAttrsBuilder_;
9464        }
9465
9466        // optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;
9467        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
9468        private com.google.protobuf.SingleFieldBuilder<
9469            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> typeQuotasBuilder_;
9470        /**
9471         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9472         */
9473        public boolean hasTypeQuotas() {
9474          return ((bitField0_ & 0x00000040) == 0x00000040);
9475        }
9476        /**
9477         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9478         */
9479        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() {
9480          if (typeQuotasBuilder_ == null) {
9481            return typeQuotas_;
9482          } else {
9483            return typeQuotasBuilder_.getMessage();
9484          }
9485        }
9486        /**
9487         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9488         */
9489        public Builder setTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) {
9490          if (typeQuotasBuilder_ == null) {
9491            if (value == null) {
9492              throw new NullPointerException();
9493            }
9494            typeQuotas_ = value;
9495            onChanged();
9496          } else {
9497            typeQuotasBuilder_.setMessage(value);
9498          }
9499          bitField0_ |= 0x00000040;
9500          return this;
9501        }
9502        /**
9503         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9504         */
9505        public Builder setTypeQuotas(
9506            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder builderForValue) {
9507          if (typeQuotasBuilder_ == null) {
9508            typeQuotas_ = builderForValue.build();
9509            onChanged();
9510          } else {
9511            typeQuotasBuilder_.setMessage(builderForValue.build());
9512          }
9513          bitField0_ |= 0x00000040;
9514          return this;
9515        }
9516        /**
9517         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9518         */
9519        public Builder mergeTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) {
9520          if (typeQuotasBuilder_ == null) {
9521            if (((bitField0_ & 0x00000040) == 0x00000040) &&
9522                typeQuotas_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) {
9523              typeQuotas_ =
9524                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.newBuilder(typeQuotas_).mergeFrom(value).buildPartial();
9525            } else {
9526              typeQuotas_ = value;
9527            }
9528            onChanged();
9529          } else {
9530            typeQuotasBuilder_.mergeFrom(value);
9531          }
9532          bitField0_ |= 0x00000040;
9533          return this;
9534        }
9535        /**
9536         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9537         */
9538        public Builder clearTypeQuotas() {
9539          if (typeQuotasBuilder_ == null) {
9540            typeQuotas_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
9541            onChanged();
9542          } else {
9543            typeQuotasBuilder_.clear();
9544          }
9545          bitField0_ = (bitField0_ & ~0x00000040);
9546          return this;
9547        }
9548        /**
9549         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9550         */
9551        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder getTypeQuotasBuilder() {
9552          bitField0_ |= 0x00000040;
9553          onChanged();
9554          return getTypeQuotasFieldBuilder().getBuilder();
9555        }
9556        /**
9557         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9558         */
9559        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() {
9560          if (typeQuotasBuilder_ != null) {
9561            return typeQuotasBuilder_.getMessageOrBuilder();
9562          } else {
9563            return typeQuotas_;
9564          }
9565        }
9566        /**
9567         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
9568         */
9569        private com.google.protobuf.SingleFieldBuilder<
9570            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> 
9571            getTypeQuotasFieldBuilder() {
9572          if (typeQuotasBuilder_ == null) {
9573            typeQuotasBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9574                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder>(
9575                    typeQuotas_,
9576                    getParentForChildren(),
9577                    isClean());
9578            typeQuotas_ = null;
9579          }
9580          return typeQuotasBuilder_;
9581        }
9582
9583        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
9584      }
9585
9586      static {
9587        defaultInstance = new INodeDirectory(true);
9588        defaultInstance.initFields();
9589      }
9590
9591      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
9592    }
9593
9594    public interface INodeSymlinkOrBuilder
9595        extends com.google.protobuf.MessageOrBuilder {
9596
9597      // optional fixed64 permission = 1;
9598      /**
9599       * <code>optional fixed64 permission = 1;</code>
9600       */
9601      boolean hasPermission();
9602      /**
9603       * <code>optional fixed64 permission = 1;</code>
9604       */
9605      long getPermission();
9606
9607      // optional bytes target = 2;
9608      /**
9609       * <code>optional bytes target = 2;</code>
9610       */
9611      boolean hasTarget();
9612      /**
9613       * <code>optional bytes target = 2;</code>
9614       */
9615      com.google.protobuf.ByteString getTarget();
9616
9617      // optional uint64 modificationTime = 3;
9618      /**
9619       * <code>optional uint64 modificationTime = 3;</code>
9620       */
9621      boolean hasModificationTime();
9622      /**
9623       * <code>optional uint64 modificationTime = 3;</code>
9624       */
9625      long getModificationTime();
9626
9627      // optional uint64 accessTime = 4;
9628      /**
9629       * <code>optional uint64 accessTime = 4;</code>
9630       */
9631      boolean hasAccessTime();
9632      /**
9633       * <code>optional uint64 accessTime = 4;</code>
9634       */
9635      long getAccessTime();
9636    }
9637    /**
9638     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink}
9639     */
9640    public static final class INodeSymlink extends
9641        com.google.protobuf.GeneratedMessage
9642        implements INodeSymlinkOrBuilder {
9643      // Use INodeSymlink.newBuilder() to construct.
9644      private INodeSymlink(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9645        super(builder);
9646        this.unknownFields = builder.getUnknownFields();
9647      }
9648      private INodeSymlink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9649
9650      private static final INodeSymlink defaultInstance;
9651      public static INodeSymlink getDefaultInstance() {
9652        return defaultInstance;
9653      }
9654
9655      public INodeSymlink getDefaultInstanceForType() {
9656        return defaultInstance;
9657      }
9658
9659      private final com.google.protobuf.UnknownFieldSet unknownFields;
9660      @java.lang.Override
9661      public final com.google.protobuf.UnknownFieldSet
9662          getUnknownFields() {
9663        return this.unknownFields;
9664      }
9665      private INodeSymlink(
9666          com.google.protobuf.CodedInputStream input,
9667          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9668          throws com.google.protobuf.InvalidProtocolBufferException {
9669        initFields();
9670        int mutable_bitField0_ = 0;
9671        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9672            com.google.protobuf.UnknownFieldSet.newBuilder();
9673        try {
9674          boolean done = false;
9675          while (!done) {
9676            int tag = input.readTag();
9677            switch (tag) {
9678              case 0:
9679                done = true;
9680                break;
9681              default: {
9682                if (!parseUnknownField(input, unknownFields,
9683                                       extensionRegistry, tag)) {
9684                  done = true;
9685                }
9686                break;
9687              }
9688              case 9: {
9689                bitField0_ |= 0x00000001;
9690                permission_ = input.readFixed64();
9691                break;
9692              }
9693              case 18: {
9694                bitField0_ |= 0x00000002;
9695                target_ = input.readBytes();
9696                break;
9697              }
9698              case 24: {
9699                bitField0_ |= 0x00000004;
9700                modificationTime_ = input.readUInt64();
9701                break;
9702              }
9703              case 32: {
9704                bitField0_ |= 0x00000008;
9705                accessTime_ = input.readUInt64();
9706                break;
9707              }
9708            }
9709          }
9710        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9711          throw e.setUnfinishedMessage(this);
9712        } catch (java.io.IOException e) {
9713          throw new com.google.protobuf.InvalidProtocolBufferException(
9714              e.getMessage()).setUnfinishedMessage(this);
9715        } finally {
9716          this.unknownFields = unknownFields.build();
9717          makeExtensionsImmutable();
9718        }
9719      }
9720      public static final com.google.protobuf.Descriptors.Descriptor
9721          getDescriptor() {
9722        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
9723      }
9724
9725      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9726          internalGetFieldAccessorTable() {
9727        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable
9728            .ensureFieldAccessorsInitialized(
9729                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class);
9730      }
9731
9732      public static com.google.protobuf.Parser<INodeSymlink> PARSER =
9733          new com.google.protobuf.AbstractParser<INodeSymlink>() {
9734        public INodeSymlink parsePartialFrom(
9735            com.google.protobuf.CodedInputStream input,
9736            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9737            throws com.google.protobuf.InvalidProtocolBufferException {
9738          return new INodeSymlink(input, extensionRegistry);
9739        }
9740      };
9741
9742      @java.lang.Override
9743      public com.google.protobuf.Parser<INodeSymlink> getParserForType() {
9744        return PARSER;
9745      }
9746
9747      private int bitField0_;
9748      // optional fixed64 permission = 1;
9749      public static final int PERMISSION_FIELD_NUMBER = 1;
9750      private long permission_;
9751      /**
9752       * <code>optional fixed64 permission = 1;</code>
9753       */
9754      public boolean hasPermission() {
9755        return ((bitField0_ & 0x00000001) == 0x00000001);
9756      }
9757      /**
9758       * <code>optional fixed64 permission = 1;</code>
9759       */
9760      public long getPermission() {
9761        return permission_;
9762      }
9763
9764      // optional bytes target = 2;
9765      public static final int TARGET_FIELD_NUMBER = 2;
9766      private com.google.protobuf.ByteString target_;
9767      /**
9768       * <code>optional bytes target = 2;</code>
9769       */
9770      public boolean hasTarget() {
9771        return ((bitField0_ & 0x00000002) == 0x00000002);
9772      }
9773      /**
9774       * <code>optional bytes target = 2;</code>
9775       */
9776      public com.google.protobuf.ByteString getTarget() {
9777        return target_;
9778      }
9779
9780      // optional uint64 modificationTime = 3;
9781      public static final int MODIFICATIONTIME_FIELD_NUMBER = 3;
9782      private long modificationTime_;
9783      /**
9784       * <code>optional uint64 modificationTime = 3;</code>
9785       */
9786      public boolean hasModificationTime() {
9787        return ((bitField0_ & 0x00000004) == 0x00000004);
9788      }
9789      /**
9790       * <code>optional uint64 modificationTime = 3;</code>
9791       */
9792      public long getModificationTime() {
9793        return modificationTime_;
9794      }
9795
9796      // optional uint64 accessTime = 4;
9797      public static final int ACCESSTIME_FIELD_NUMBER = 4;
9798      private long accessTime_;
9799      /**
9800       * <code>optional uint64 accessTime = 4;</code>
9801       */
9802      public boolean hasAccessTime() {
9803        return ((bitField0_ & 0x00000008) == 0x00000008);
9804      }
9805      /**
9806       * <code>optional uint64 accessTime = 4;</code>
9807       */
9808      public long getAccessTime() {
9809        return accessTime_;
9810      }
9811
9812      private void initFields() {
9813        permission_ = 0L;
9814        target_ = com.google.protobuf.ByteString.EMPTY;
9815        modificationTime_ = 0L;
9816        accessTime_ = 0L;
9817      }
9818      private byte memoizedIsInitialized = -1;
9819      public final boolean isInitialized() {
9820        byte isInitialized = memoizedIsInitialized;
9821        if (isInitialized != -1) return isInitialized == 1;
9822
9823        memoizedIsInitialized = 1;
9824        return true;
9825      }
9826
9827      public void writeTo(com.google.protobuf.CodedOutputStream output)
9828                          throws java.io.IOException {
9829        getSerializedSize();
9830        if (((bitField0_ & 0x00000001) == 0x00000001)) {
9831          output.writeFixed64(1, permission_);
9832        }
9833        if (((bitField0_ & 0x00000002) == 0x00000002)) {
9834          output.writeBytes(2, target_);
9835        }
9836        if (((bitField0_ & 0x00000004) == 0x00000004)) {
9837          output.writeUInt64(3, modificationTime_);
9838        }
9839        if (((bitField0_ & 0x00000008) == 0x00000008)) {
9840          output.writeUInt64(4, accessTime_);
9841        }
9842        getUnknownFields().writeTo(output);
9843      }
9844
9845      private int memoizedSerializedSize = -1;
9846      public int getSerializedSize() {
9847        int size = memoizedSerializedSize;
9848        if (size != -1) return size;
9849
9850        size = 0;
9851        if (((bitField0_ & 0x00000001) == 0x00000001)) {
9852          size += com.google.protobuf.CodedOutputStream
9853            .computeFixed64Size(1, permission_);
9854        }
9855        if (((bitField0_ & 0x00000002) == 0x00000002)) {
9856          size += com.google.protobuf.CodedOutputStream
9857            .computeBytesSize(2, target_);
9858        }
9859        if (((bitField0_ & 0x00000004) == 0x00000004)) {
9860          size += com.google.protobuf.CodedOutputStream
9861            .computeUInt64Size(3, modificationTime_);
9862        }
9863        if (((bitField0_ & 0x00000008) == 0x00000008)) {
9864          size += com.google.protobuf.CodedOutputStream
9865            .computeUInt64Size(4, accessTime_);
9866        }
9867        size += getUnknownFields().getSerializedSize();
9868        memoizedSerializedSize = size;
9869        return size;
9870      }
9871
9872      private static final long serialVersionUID = 0L;
9873      @java.lang.Override
9874      protected java.lang.Object writeReplace()
9875          throws java.io.ObjectStreamException {
9876        return super.writeReplace();
9877      }
9878
9879      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9880          com.google.protobuf.ByteString data)
9881          throws com.google.protobuf.InvalidProtocolBufferException {
9882        return PARSER.parseFrom(data);
9883      }
9884      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9885          com.google.protobuf.ByteString data,
9886          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9887          throws com.google.protobuf.InvalidProtocolBufferException {
9888        return PARSER.parseFrom(data, extensionRegistry);
9889      }
9890      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(byte[] data)
9891          throws com.google.protobuf.InvalidProtocolBufferException {
9892        return PARSER.parseFrom(data);
9893      }
9894      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9895          byte[] data,
9896          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9897          throws com.google.protobuf.InvalidProtocolBufferException {
9898        return PARSER.parseFrom(data, extensionRegistry);
9899      }
9900      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(java.io.InputStream input)
9901          throws java.io.IOException {
9902        return PARSER.parseFrom(input);
9903      }
9904      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9905          java.io.InputStream input,
9906          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9907          throws java.io.IOException {
9908        return PARSER.parseFrom(input, extensionRegistry);
9909      }
9910      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom(java.io.InputStream input)
9911          throws java.io.IOException {
9912        return PARSER.parseDelimitedFrom(input);
9913      }
9914      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom(
9915          java.io.InputStream input,
9916          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9917          throws java.io.IOException {
9918        return PARSER.parseDelimitedFrom(input, extensionRegistry);
9919      }
9920      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9921          com.google.protobuf.CodedInputStream input)
9922          throws java.io.IOException {
9923        return PARSER.parseFrom(input);
9924      }
9925      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
9926          com.google.protobuf.CodedInputStream input,
9927          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9928          throws java.io.IOException {
9929        return PARSER.parseFrom(input, extensionRegistry);
9930      }
9931
9932      public static Builder newBuilder() { return Builder.create(); }
9933      public Builder newBuilderForType() { return newBuilder(); }
9934      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink prototype) {
9935        return newBuilder().mergeFrom(prototype);
9936      }
9937      public Builder toBuilder() { return newBuilder(this); }
9938
9939      @java.lang.Override
9940      protected Builder newBuilderForType(
9941          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9942        Builder builder = new Builder(parent);
9943        return builder;
9944      }
9945      /**
9946       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink}
9947       */
9948      public static final class Builder extends
9949          com.google.protobuf.GeneratedMessage.Builder<Builder>
9950         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder {
9951        public static final com.google.protobuf.Descriptors.Descriptor
9952            getDescriptor() {
9953          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
9954        }
9955
9956        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9957            internalGetFieldAccessorTable() {
9958          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable
9959              .ensureFieldAccessorsInitialized(
9960                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class);
9961        }
9962
9963        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.newBuilder()
9964        private Builder() {
9965          maybeForceBuilderInitialization();
9966        }
9967
9968        private Builder(
9969            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9970          super(parent);
9971          maybeForceBuilderInitialization();
9972        }
9973        private void maybeForceBuilderInitialization() {
9974          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9975          }
9976        }
9977        private static Builder create() {
9978          return new Builder();
9979        }
9980
9981        public Builder clear() {
9982          super.clear();
9983          permission_ = 0L;
9984          bitField0_ = (bitField0_ & ~0x00000001);
9985          target_ = com.google.protobuf.ByteString.EMPTY;
9986          bitField0_ = (bitField0_ & ~0x00000002);
9987          modificationTime_ = 0L;
9988          bitField0_ = (bitField0_ & ~0x00000004);
9989          accessTime_ = 0L;
9990          bitField0_ = (bitField0_ & ~0x00000008);
9991          return this;
9992        }
9993
9994        public Builder clone() {
9995          return create().mergeFrom(buildPartial());
9996        }
9997
9998        public com.google.protobuf.Descriptors.Descriptor
9999            getDescriptorForType() {
10000          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
10001        }
10002
10003        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstanceForType() {
10004          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
10005        }
10006
10007        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink build() {
10008          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = buildPartial();
10009          if (!result.isInitialized()) {
10010            throw newUninitializedMessageException(result);
10011          }
10012          return result;
10013        }
10014
10015        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink buildPartial() {
10016          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink(this);
10017          int from_bitField0_ = bitField0_;
10018          int to_bitField0_ = 0;
10019          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10020            to_bitField0_ |= 0x00000001;
10021          }
10022          result.permission_ = permission_;
10023          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10024            to_bitField0_ |= 0x00000002;
10025          }
10026          result.target_ = target_;
10027          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
10028            to_bitField0_ |= 0x00000004;
10029          }
10030          result.modificationTime_ = modificationTime_;
10031          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
10032            to_bitField0_ |= 0x00000008;
10033          }
10034          result.accessTime_ = accessTime_;
10035          result.bitField0_ = to_bitField0_;
10036          onBuilt();
10037          return result;
10038        }
10039
10040        public Builder mergeFrom(com.google.protobuf.Message other) {
10041          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) {
10042            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink)other);
10043          } else {
10044            super.mergeFrom(other);
10045            return this;
10046          }
10047        }
10048
10049        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink other) {
10050          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) return this;
10051          if (other.hasPermission()) {
10052            setPermission(other.getPermission());
10053          }
10054          if (other.hasTarget()) {
10055            setTarget(other.getTarget());
10056          }
10057          if (other.hasModificationTime()) {
10058            setModificationTime(other.getModificationTime());
10059          }
10060          if (other.hasAccessTime()) {
10061            setAccessTime(other.getAccessTime());
10062          }
10063          this.mergeUnknownFields(other.getUnknownFields());
10064          return this;
10065        }
10066
10067        public final boolean isInitialized() {
10068          return true;
10069        }
10070
10071        public Builder mergeFrom(
10072            com.google.protobuf.CodedInputStream input,
10073            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10074            throws java.io.IOException {
10075          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parsedMessage = null;
10076          try {
10077            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10078          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10079            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) e.getUnfinishedMessage();
10080            throw e;
10081          } finally {
10082            if (parsedMessage != null) {
10083              mergeFrom(parsedMessage);
10084            }
10085          }
10086          return this;
10087        }
10088        private int bitField0_;
10089
10090        // optional fixed64 permission = 1;
10091        private long permission_ ;
10092        /**
10093         * <code>optional fixed64 permission = 1;</code>
10094         */
10095        public boolean hasPermission() {
10096          return ((bitField0_ & 0x00000001) == 0x00000001);
10097        }
10098        /**
10099         * <code>optional fixed64 permission = 1;</code>
10100         */
10101        public long getPermission() {
10102          return permission_;
10103        }
10104        /**
10105         * <code>optional fixed64 permission = 1;</code>
10106         */
10107        public Builder setPermission(long value) {
10108          bitField0_ |= 0x00000001;
10109          permission_ = value;
10110          onChanged();
10111          return this;
10112        }
10113        /**
10114         * <code>optional fixed64 permission = 1;</code>
10115         */
10116        public Builder clearPermission() {
10117          bitField0_ = (bitField0_ & ~0x00000001);
10118          permission_ = 0L;
10119          onChanged();
10120          return this;
10121        }
10122
10123        // optional bytes target = 2;
10124        private com.google.protobuf.ByteString target_ = com.google.protobuf.ByteString.EMPTY;
10125        /**
10126         * <code>optional bytes target = 2;</code>
10127         */
10128        public boolean hasTarget() {
10129          return ((bitField0_ & 0x00000002) == 0x00000002);
10130        }
10131        /**
10132         * <code>optional bytes target = 2;</code>
10133         */
10134        public com.google.protobuf.ByteString getTarget() {
10135          return target_;
10136        }
10137        /**
10138         * <code>optional bytes target = 2;</code>
10139         */
10140        public Builder setTarget(com.google.protobuf.ByteString value) {
10141          if (value == null) {
10142    throw new NullPointerException();
10143  }
10144  bitField0_ |= 0x00000002;
10145          target_ = value;
10146          onChanged();
10147          return this;
10148        }
10149        /**
10150         * <code>optional bytes target = 2;</code>
10151         */
10152        public Builder clearTarget() {
10153          bitField0_ = (bitField0_ & ~0x00000002);
10154          target_ = getDefaultInstance().getTarget();
10155          onChanged();
10156          return this;
10157        }
10158
10159        // optional uint64 modificationTime = 3;
10160        private long modificationTime_ ;
10161        /**
10162         * <code>optional uint64 modificationTime = 3;</code>
10163         */
10164        public boolean hasModificationTime() {
10165          return ((bitField0_ & 0x00000004) == 0x00000004);
10166        }
10167        /**
10168         * <code>optional uint64 modificationTime = 3;</code>
10169         */
10170        public long getModificationTime() {
10171          return modificationTime_;
10172        }
10173        /**
10174         * <code>optional uint64 modificationTime = 3;</code>
10175         */
10176        public Builder setModificationTime(long value) {
10177          bitField0_ |= 0x00000004;
10178          modificationTime_ = value;
10179          onChanged();
10180          return this;
10181        }
10182        /**
10183         * <code>optional uint64 modificationTime = 3;</code>
10184         */
10185        public Builder clearModificationTime() {
10186          bitField0_ = (bitField0_ & ~0x00000004);
10187          modificationTime_ = 0L;
10188          onChanged();
10189          return this;
10190        }
10191
10192        // optional uint64 accessTime = 4;
10193        private long accessTime_ ;
10194        /**
10195         * <code>optional uint64 accessTime = 4;</code>
10196         */
10197        public boolean hasAccessTime() {
10198          return ((bitField0_ & 0x00000008) == 0x00000008);
10199        }
10200        /**
10201         * <code>optional uint64 accessTime = 4;</code>
10202         */
10203        public long getAccessTime() {
10204          return accessTime_;
10205        }
10206        /**
10207         * <code>optional uint64 accessTime = 4;</code>
10208         */
10209        public Builder setAccessTime(long value) {
10210          bitField0_ |= 0x00000008;
10211          accessTime_ = value;
10212          onChanged();
10213          return this;
10214        }
10215        /**
10216         * <code>optional uint64 accessTime = 4;</code>
10217         */
10218        public Builder clearAccessTime() {
10219          bitField0_ = (bitField0_ & ~0x00000008);
10220          accessTime_ = 0L;
10221          onChanged();
10222          return this;
10223        }
10224
10225        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
10226      }
10227
10228      static {
10229        defaultInstance = new INodeSymlink(true);
10230        defaultInstance.initFields();
10231      }
10232
10233      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
10234    }
10235
10236    public interface INodeOrBuilder
10237        extends com.google.protobuf.MessageOrBuilder {
10238
10239      // required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;
10240      /**
10241       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
10242       */
10243      boolean hasType();
10244      /**
10245       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
10246       */
10247      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType();
10248
10249      // required uint64 id = 2;
10250      /**
10251       * <code>required uint64 id = 2;</code>
10252       */
10253      boolean hasId();
10254      /**
10255       * <code>required uint64 id = 2;</code>
10256       */
10257      long getId();
10258
10259      // optional bytes name = 3;
10260      /**
10261       * <code>optional bytes name = 3;</code>
10262       */
10263      boolean hasName();
10264      /**
10265       * <code>optional bytes name = 3;</code>
10266       */
10267      com.google.protobuf.ByteString getName();
10268
10269      // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;
10270      /**
10271       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10272       */
10273      boolean hasFile();
10274      /**
10275       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10276       */
10277      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile();
10278      /**
10279       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10280       */
10281      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder();
10282
10283      // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;
10284      /**
10285       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10286       */
10287      boolean hasDirectory();
10288      /**
10289       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10290       */
10291      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory();
10292      /**
10293       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10294       */
10295      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder();
10296
10297      // optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;
10298      /**
10299       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10300       */
10301      boolean hasSymlink();
10302      /**
10303       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10304       */
10305      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink();
10306      /**
10307       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10308       */
10309      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder();
10310    }
10311    /**
10312     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode}
10313     */
10314    public static final class INode extends
10315        com.google.protobuf.GeneratedMessage
10316        implements INodeOrBuilder {
10317      // Use INode.newBuilder() to construct.
10318      private INode(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10319        super(builder);
10320        this.unknownFields = builder.getUnknownFields();
10321      }
10322      private INode(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10323
10324      private static final INode defaultInstance;
10325      public static INode getDefaultInstance() {
10326        return defaultInstance;
10327      }
10328
10329      public INode getDefaultInstanceForType() {
10330        return defaultInstance;
10331      }
10332
10333      private final com.google.protobuf.UnknownFieldSet unknownFields;
10334      @java.lang.Override
10335      public final com.google.protobuf.UnknownFieldSet
10336          getUnknownFields() {
10337        return this.unknownFields;
10338      }
10339      private INode(
10340          com.google.protobuf.CodedInputStream input,
10341          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10342          throws com.google.protobuf.InvalidProtocolBufferException {
10343        initFields();
10344        int mutable_bitField0_ = 0;
10345        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10346            com.google.protobuf.UnknownFieldSet.newBuilder();
10347        try {
10348          boolean done = false;
10349          while (!done) {
10350            int tag = input.readTag();
10351            switch (tag) {
10352              case 0:
10353                done = true;
10354                break;
10355              default: {
10356                if (!parseUnknownField(input, unknownFields,
10357                                       extensionRegistry, tag)) {
10358                  done = true;
10359                }
10360                break;
10361              }
10362              case 8: {
10363                int rawValue = input.readEnum();
10364                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type value = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.valueOf(rawValue);
10365                if (value == null) {
10366                  unknownFields.mergeVarintField(1, rawValue);
10367                } else {
10368                  bitField0_ |= 0x00000001;
10369                  type_ = value;
10370                }
10371                break;
10372              }
10373              case 16: {
10374                bitField0_ |= 0x00000002;
10375                id_ = input.readUInt64();
10376                break;
10377              }
10378              case 26: {
10379                bitField0_ |= 0x00000004;
10380                name_ = input.readBytes();
10381                break;
10382              }
10383              case 34: {
10384                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder subBuilder = null;
10385                if (((bitField0_ & 0x00000008) == 0x00000008)) {
10386                  subBuilder = file_.toBuilder();
10387                }
10388                file_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.PARSER, extensionRegistry);
10389                if (subBuilder != null) {
10390                  subBuilder.mergeFrom(file_);
10391                  file_ = subBuilder.buildPartial();
10392                }
10393                bitField0_ |= 0x00000008;
10394                break;
10395              }
10396              case 42: {
10397                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder subBuilder = null;
10398                if (((bitField0_ & 0x00000010) == 0x00000010)) {
10399                  subBuilder = directory_.toBuilder();
10400                }
10401                directory_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.PARSER, extensionRegistry);
10402                if (subBuilder != null) {
10403                  subBuilder.mergeFrom(directory_);
10404                  directory_ = subBuilder.buildPartial();
10405                }
10406                bitField0_ |= 0x00000010;
10407                break;
10408              }
10409              case 50: {
10410                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder subBuilder = null;
10411                if (((bitField0_ & 0x00000020) == 0x00000020)) {
10412                  subBuilder = symlink_.toBuilder();
10413                }
10414                symlink_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.PARSER, extensionRegistry);
10415                if (subBuilder != null) {
10416                  subBuilder.mergeFrom(symlink_);
10417                  symlink_ = subBuilder.buildPartial();
10418                }
10419                bitField0_ |= 0x00000020;
10420                break;
10421              }
10422            }
10423          }
10424        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10425          throw e.setUnfinishedMessage(this);
10426        } catch (java.io.IOException e) {
10427          throw new com.google.protobuf.InvalidProtocolBufferException(
10428              e.getMessage()).setUnfinishedMessage(this);
10429        } finally {
10430          this.unknownFields = unknownFields.build();
10431          makeExtensionsImmutable();
10432        }
10433      }
10434      public static final com.google.protobuf.Descriptors.Descriptor
10435          getDescriptor() {
10436        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
10437      }
10438
10439      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10440          internalGetFieldAccessorTable() {
10441        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable
10442            .ensureFieldAccessorsInitialized(
10443                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class);
10444      }
10445
10446      public static com.google.protobuf.Parser<INode> PARSER =
10447          new com.google.protobuf.AbstractParser<INode>() {
10448        public INode parsePartialFrom(
10449            com.google.protobuf.CodedInputStream input,
10450            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10451            throws com.google.protobuf.InvalidProtocolBufferException {
10452          return new INode(input, extensionRegistry);
10453        }
10454      };
10455
10456      @java.lang.Override
10457      public com.google.protobuf.Parser<INode> getParserForType() {
10458        return PARSER;
10459      }
10460
10461      /**
10462       * Protobuf enum {@code hadoop.hdfs.fsimage.INodeSection.INode.Type}
10463       */
10464      public enum Type
10465          implements com.google.protobuf.ProtocolMessageEnum {
10466        /**
10467         * <code>FILE = 1;</code>
10468         */
10469        FILE(0, 1),
10470        /**
10471         * <code>DIRECTORY = 2;</code>
10472         */
10473        DIRECTORY(1, 2),
10474        /**
10475         * <code>SYMLINK = 3;</code>
10476         */
10477        SYMLINK(2, 3),
10478        ;
10479
10480        /**
10481         * <code>FILE = 1;</code>
10482         */
10483        public static final int FILE_VALUE = 1;
10484        /**
10485         * <code>DIRECTORY = 2;</code>
10486         */
10487        public static final int DIRECTORY_VALUE = 2;
10488        /**
10489         * <code>SYMLINK = 3;</code>
10490         */
10491        public static final int SYMLINK_VALUE = 3;
10492
10493
10494        public final int getNumber() { return value; }
10495
10496        public static Type valueOf(int value) {
10497          switch (value) {
10498            case 1: return FILE;
10499            case 2: return DIRECTORY;
10500            case 3: return SYMLINK;
10501            default: return null;
10502          }
10503        }
10504
10505        public static com.google.protobuf.Internal.EnumLiteMap<Type>
10506            internalGetValueMap() {
10507          return internalValueMap;
10508        }
10509        private static com.google.protobuf.Internal.EnumLiteMap<Type>
10510            internalValueMap =
10511              new com.google.protobuf.Internal.EnumLiteMap<Type>() {
10512                public Type findValueByNumber(int number) {
10513                  return Type.valueOf(number);
10514                }
10515              };
10516
10517        public final com.google.protobuf.Descriptors.EnumValueDescriptor
10518            getValueDescriptor() {
10519          return getDescriptor().getValues().get(index);
10520        }
10521        public final com.google.protobuf.Descriptors.EnumDescriptor
10522            getDescriptorForType() {
10523          return getDescriptor();
10524        }
10525        public static final com.google.protobuf.Descriptors.EnumDescriptor
10526            getDescriptor() {
10527          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDescriptor().getEnumTypes().get(0);
10528        }
10529
10530        private static final Type[] VALUES = values();
10531
10532        public static Type valueOf(
10533            com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
10534          if (desc.getType() != getDescriptor()) {
10535            throw new java.lang.IllegalArgumentException(
10536              "EnumValueDescriptor is not for this type.");
10537          }
10538          return VALUES[desc.getIndex()];
10539        }
10540
10541        private final int index;
10542        private final int value;
10543
10544        private Type(int index, int value) {
10545          this.index = index;
10546          this.value = value;
10547        }
10548
10549        // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.INodeSection.INode.Type)
10550      }
10551
10552      private int bitField0_;
10553      // required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;
10554      public static final int TYPE_FIELD_NUMBER = 1;
10555      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type type_;
10556      /**
10557       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
10558       */
10559      public boolean hasType() {
10560        return ((bitField0_ & 0x00000001) == 0x00000001);
10561      }
10562      /**
10563       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
10564       */
10565      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() {
10566        return type_;
10567      }
10568
10569      // required uint64 id = 2;
10570      public static final int ID_FIELD_NUMBER = 2;
10571      private long id_;
10572      /**
10573       * <code>required uint64 id = 2;</code>
10574       */
10575      public boolean hasId() {
10576        return ((bitField0_ & 0x00000002) == 0x00000002);
10577      }
10578      /**
10579       * <code>required uint64 id = 2;</code>
10580       */
10581      public long getId() {
10582        return id_;
10583      }
10584
10585      // optional bytes name = 3;
10586      public static final int NAME_FIELD_NUMBER = 3;
10587      private com.google.protobuf.ByteString name_;
10588      /**
10589       * <code>optional bytes name = 3;</code>
10590       */
10591      public boolean hasName() {
10592        return ((bitField0_ & 0x00000004) == 0x00000004);
10593      }
10594      /**
10595       * <code>optional bytes name = 3;</code>
10596       */
10597      public com.google.protobuf.ByteString getName() {
10598        return name_;
10599      }
10600
10601      // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;
10602      public static final int FILE_FIELD_NUMBER = 4;
10603      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_;
10604      /**
10605       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10606       */
10607      public boolean hasFile() {
10608        return ((bitField0_ & 0x00000008) == 0x00000008);
10609      }
10610      /**
10611       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10612       */
10613      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() {
10614        return file_;
10615      }
10616      /**
10617       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
10618       */
10619      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() {
10620        return file_;
10621      }
10622
10623      // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;
10624      public static final int DIRECTORY_FIELD_NUMBER = 5;
10625      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_;
10626      /**
10627       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10628       */
10629      public boolean hasDirectory() {
10630        return ((bitField0_ & 0x00000010) == 0x00000010);
10631      }
10632      /**
10633       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10634       */
10635      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() {
10636        return directory_;
10637      }
10638      /**
10639       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
10640       */
10641      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() {
10642        return directory_;
10643      }
10644
10645      // optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;
10646      public static final int SYMLINK_FIELD_NUMBER = 6;
10647      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_;
10648      /**
10649       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10650       */
10651      public boolean hasSymlink() {
10652        return ((bitField0_ & 0x00000020) == 0x00000020);
10653      }
10654      /**
10655       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10656       */
10657      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() {
10658        return symlink_;
10659      }
10660      /**
10661       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
10662       */
10663      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() {
10664        return symlink_;
10665      }
10666
10667      private void initFields() {
10668        type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
10669        id_ = 0L;
10670        name_ = com.google.protobuf.ByteString.EMPTY;
10671        file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
10672        directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
10673        symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
10674      }
10675      private byte memoizedIsInitialized = -1;
10676      public final boolean isInitialized() {
10677        byte isInitialized = memoizedIsInitialized;
10678        if (isInitialized != -1) return isInitialized == 1;
10679
10680        if (!hasType()) {
10681          memoizedIsInitialized = 0;
10682          return false;
10683        }
10684        if (!hasId()) {
10685          memoizedIsInitialized = 0;
10686          return false;
10687        }
10688        if (hasFile()) {
10689          if (!getFile().isInitialized()) {
10690            memoizedIsInitialized = 0;
10691            return false;
10692          }
10693        }
10694        if (hasDirectory()) {
10695          if (!getDirectory().isInitialized()) {
10696            memoizedIsInitialized = 0;
10697            return false;
10698          }
10699        }
10700        memoizedIsInitialized = 1;
10701        return true;
10702      }
10703
10704      public void writeTo(com.google.protobuf.CodedOutputStream output)
10705                          throws java.io.IOException {
10706        getSerializedSize();
10707        if (((bitField0_ & 0x00000001) == 0x00000001)) {
10708          output.writeEnum(1, type_.getNumber());
10709        }
10710        if (((bitField0_ & 0x00000002) == 0x00000002)) {
10711          output.writeUInt64(2, id_);
10712        }
10713        if (((bitField0_ & 0x00000004) == 0x00000004)) {
10714          output.writeBytes(3, name_);
10715        }
10716        if (((bitField0_ & 0x00000008) == 0x00000008)) {
10717          output.writeMessage(4, file_);
10718        }
10719        if (((bitField0_ & 0x00000010) == 0x00000010)) {
10720          output.writeMessage(5, directory_);
10721        }
10722        if (((bitField0_ & 0x00000020) == 0x00000020)) {
10723          output.writeMessage(6, symlink_);
10724        }
10725        getUnknownFields().writeTo(output);
10726      }
10727
10728      private int memoizedSerializedSize = -1;
10729      public int getSerializedSize() {
10730        int size = memoizedSerializedSize;
10731        if (size != -1) return size;
10732
10733        size = 0;
10734        if (((bitField0_ & 0x00000001) == 0x00000001)) {
10735          size += com.google.protobuf.CodedOutputStream
10736            .computeEnumSize(1, type_.getNumber());
10737        }
10738        if (((bitField0_ & 0x00000002) == 0x00000002)) {
10739          size += com.google.protobuf.CodedOutputStream
10740            .computeUInt64Size(2, id_);
10741        }
10742        if (((bitField0_ & 0x00000004) == 0x00000004)) {
10743          size += com.google.protobuf.CodedOutputStream
10744            .computeBytesSize(3, name_);
10745        }
10746        if (((bitField0_ & 0x00000008) == 0x00000008)) {
10747          size += com.google.protobuf.CodedOutputStream
10748            .computeMessageSize(4, file_);
10749        }
10750        if (((bitField0_ & 0x00000010) == 0x00000010)) {
10751          size += com.google.protobuf.CodedOutputStream
10752            .computeMessageSize(5, directory_);
10753        }
10754        if (((bitField0_ & 0x00000020) == 0x00000020)) {
10755          size += com.google.protobuf.CodedOutputStream
10756            .computeMessageSize(6, symlink_);
10757        }
10758        size += getUnknownFields().getSerializedSize();
10759        memoizedSerializedSize = size;
10760        return size;
10761      }
10762
10763      private static final long serialVersionUID = 0L;
10764      @java.lang.Override
10765      protected java.lang.Object writeReplace()
10766          throws java.io.ObjectStreamException {
10767        return super.writeReplace();
10768      }
10769
10770      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10771          com.google.protobuf.ByteString data)
10772          throws com.google.protobuf.InvalidProtocolBufferException {
10773        return PARSER.parseFrom(data);
10774      }
10775      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10776          com.google.protobuf.ByteString data,
10777          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10778          throws com.google.protobuf.InvalidProtocolBufferException {
10779        return PARSER.parseFrom(data, extensionRegistry);
10780      }
10781      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(byte[] data)
10782          throws com.google.protobuf.InvalidProtocolBufferException {
10783        return PARSER.parseFrom(data);
10784      }
10785      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10786          byte[] data,
10787          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10788          throws com.google.protobuf.InvalidProtocolBufferException {
10789        return PARSER.parseFrom(data, extensionRegistry);
10790      }
10791      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(java.io.InputStream input)
10792          throws java.io.IOException {
10793        return PARSER.parseFrom(input);
10794      }
10795      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10796          java.io.InputStream input,
10797          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10798          throws java.io.IOException {
10799        return PARSER.parseFrom(input, extensionRegistry);
10800      }
10801      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom(java.io.InputStream input)
10802          throws java.io.IOException {
10803        return PARSER.parseDelimitedFrom(input);
10804      }
10805      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom(
10806          java.io.InputStream input,
10807          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10808          throws java.io.IOException {
10809        return PARSER.parseDelimitedFrom(input, extensionRegistry);
10810      }
10811      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10812          com.google.protobuf.CodedInputStream input)
10813          throws java.io.IOException {
10814        return PARSER.parseFrom(input);
10815      }
10816      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
10817          com.google.protobuf.CodedInputStream input,
10818          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10819          throws java.io.IOException {
10820        return PARSER.parseFrom(input, extensionRegistry);
10821      }
10822
10823      public static Builder newBuilder() { return Builder.create(); }
10824      public Builder newBuilderForType() { return newBuilder(); }
10825      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode prototype) {
10826        return newBuilder().mergeFrom(prototype);
10827      }
10828      public Builder toBuilder() { return newBuilder(this); }
10829
10830      @java.lang.Override
10831      protected Builder newBuilderForType(
10832          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10833        Builder builder = new Builder(parent);
10834        return builder;
10835      }
10836      /**
10837       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode}
10838       */
10839      public static final class Builder extends
10840          com.google.protobuf.GeneratedMessage.Builder<Builder>
10841         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder {
10842        public static final com.google.protobuf.Descriptors.Descriptor
10843            getDescriptor() {
10844          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
10845        }
10846
10847        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10848            internalGetFieldAccessorTable() {
10849          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable
10850              .ensureFieldAccessorsInitialized(
10851                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class);
10852        }
10853
10854        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder()
10855        private Builder() {
10856          maybeForceBuilderInitialization();
10857        }
10858
10859        private Builder(
10860            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10861          super(parent);
10862          maybeForceBuilderInitialization();
10863        }
10864        private void maybeForceBuilderInitialization() {
10865          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10866            getFileFieldBuilder();
10867            getDirectoryFieldBuilder();
10868            getSymlinkFieldBuilder();
10869          }
10870        }
10871        private static Builder create() {
10872          return new Builder();
10873        }
10874
10875        public Builder clear() {
10876          super.clear();
10877          type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
10878          bitField0_ = (bitField0_ & ~0x00000001);
10879          id_ = 0L;
10880          bitField0_ = (bitField0_ & ~0x00000002);
10881          name_ = com.google.protobuf.ByteString.EMPTY;
10882          bitField0_ = (bitField0_ & ~0x00000004);
10883          if (fileBuilder_ == null) {
10884            file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
10885          } else {
10886            fileBuilder_.clear();
10887          }
10888          bitField0_ = (bitField0_ & ~0x00000008);
10889          if (directoryBuilder_ == null) {
10890            directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
10891          } else {
10892            directoryBuilder_.clear();
10893          }
10894          bitField0_ = (bitField0_ & ~0x00000010);
10895          if (symlinkBuilder_ == null) {
10896            symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
10897          } else {
10898            symlinkBuilder_.clear();
10899          }
10900          bitField0_ = (bitField0_ & ~0x00000020);
10901          return this;
10902        }
10903
10904        public Builder clone() {
10905          return create().mergeFrom(buildPartial());
10906        }
10907
10908        public com.google.protobuf.Descriptors.Descriptor
10909            getDescriptorForType() {
10910          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
10911        }
10912
10913        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstanceForType() {
10914          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
10915        }
10916
10917        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode build() {
10918          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = buildPartial();
10919          if (!result.isInitialized()) {
10920            throw newUninitializedMessageException(result);
10921          }
10922          return result;
10923        }
10924
10925        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode buildPartial() {
10926          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(this);
10927          int from_bitField0_ = bitField0_;
10928          int to_bitField0_ = 0;
10929          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10930            to_bitField0_ |= 0x00000001;
10931          }
10932          result.type_ = type_;
10933          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10934            to_bitField0_ |= 0x00000002;
10935          }
10936          result.id_ = id_;
10937          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
10938            to_bitField0_ |= 0x00000004;
10939          }
10940          result.name_ = name_;
10941          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
10942            to_bitField0_ |= 0x00000008;
10943          }
10944          if (fileBuilder_ == null) {
10945            result.file_ = file_;
10946          } else {
10947            result.file_ = fileBuilder_.build();
10948          }
10949          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
10950            to_bitField0_ |= 0x00000010;
10951          }
10952          if (directoryBuilder_ == null) {
10953            result.directory_ = directory_;
10954          } else {
10955            result.directory_ = directoryBuilder_.build();
10956          }
10957          if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
10958            to_bitField0_ |= 0x00000020;
10959          }
10960          if (symlinkBuilder_ == null) {
10961            result.symlink_ = symlink_;
10962          } else {
10963            result.symlink_ = symlinkBuilder_.build();
10964          }
10965          result.bitField0_ = to_bitField0_;
10966          onBuilt();
10967          return result;
10968        }
10969
10970        public Builder mergeFrom(com.google.protobuf.Message other) {
10971          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) {
10972            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode)other);
10973          } else {
10974            super.mergeFrom(other);
10975            return this;
10976          }
10977        }
10978
10979        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode other) {
10980          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) return this;
10981          if (other.hasType()) {
10982            setType(other.getType());
10983          }
10984          if (other.hasId()) {
10985            setId(other.getId());
10986          }
10987          if (other.hasName()) {
10988            setName(other.getName());
10989          }
10990          if (other.hasFile()) {
10991            mergeFile(other.getFile());
10992          }
10993          if (other.hasDirectory()) {
10994            mergeDirectory(other.getDirectory());
10995          }
10996          if (other.hasSymlink()) {
10997            mergeSymlink(other.getSymlink());
10998          }
10999          this.mergeUnknownFields(other.getUnknownFields());
11000          return this;
11001        }
11002
11003        public final boolean isInitialized() {
11004          if (!hasType()) {
11005            
11006            return false;
11007          }
11008          if (!hasId()) {
11009            
11010            return false;
11011          }
11012          if (hasFile()) {
11013            if (!getFile().isInitialized()) {
11014              
11015              return false;
11016            }
11017          }
11018          if (hasDirectory()) {
11019            if (!getDirectory().isInitialized()) {
11020              
11021              return false;
11022            }
11023          }
11024          return true;
11025        }
11026
11027        public Builder mergeFrom(
11028            com.google.protobuf.CodedInputStream input,
11029            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11030            throws java.io.IOException {
11031          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parsedMessage = null;
11032          try {
11033            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11034          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11035            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) e.getUnfinishedMessage();
11036            throw e;
11037          } finally {
11038            if (parsedMessage != null) {
11039              mergeFrom(parsedMessage);
11040            }
11041          }
11042          return this;
11043        }
11044        private int bitField0_;
11045
11046        // required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;
11047        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
11048        /**
11049         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
11050         */
11051        public boolean hasType() {
11052          return ((bitField0_ & 0x00000001) == 0x00000001);
11053        }
11054        /**
11055         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
11056         */
11057        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() {
11058          return type_;
11059        }
11060        /**
11061         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
11062         */
11063        public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type value) {
11064          if (value == null) {
11065            throw new NullPointerException();
11066          }
11067          bitField0_ |= 0x00000001;
11068          type_ = value;
11069          onChanged();
11070          return this;
11071        }
11072        /**
11073         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
11074         */
11075        public Builder clearType() {
11076          bitField0_ = (bitField0_ & ~0x00000001);
11077          type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE;
11078          onChanged();
11079          return this;
11080        }
11081
11082        // required uint64 id = 2;
11083        private long id_ ;
11084        /**
11085         * <code>required uint64 id = 2;</code>
11086         */
11087        public boolean hasId() {
11088          return ((bitField0_ & 0x00000002) == 0x00000002);
11089        }
11090        /**
11091         * <code>required uint64 id = 2;</code>
11092         */
11093        public long getId() {
11094          return id_;
11095        }
11096        /**
11097         * <code>required uint64 id = 2;</code>
11098         */
11099        public Builder setId(long value) {
11100          bitField0_ |= 0x00000002;
11101          id_ = value;
11102          onChanged();
11103          return this;
11104        }
11105        /**
11106         * <code>required uint64 id = 2;</code>
11107         */
11108        public Builder clearId() {
11109          bitField0_ = (bitField0_ & ~0x00000002);
11110          id_ = 0L;
11111          onChanged();
11112          return this;
11113        }
11114
11115        // optional bytes name = 3;
11116        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
11117        /**
11118         * <code>optional bytes name = 3;</code>
11119         */
11120        public boolean hasName() {
11121          return ((bitField0_ & 0x00000004) == 0x00000004);
11122        }
11123        /**
11124         * <code>optional bytes name = 3;</code>
11125         */
11126        public com.google.protobuf.ByteString getName() {
11127          return name_;
11128        }
11129        /**
11130         * <code>optional bytes name = 3;</code>
11131         */
11132        public Builder setName(com.google.protobuf.ByteString value) {
11133          if (value == null) {
11134    throw new NullPointerException();
11135  }
11136  bitField0_ |= 0x00000004;
11137          name_ = value;
11138          onChanged();
11139          return this;
11140        }
11141        /**
11142         * <code>optional bytes name = 3;</code>
11143         */
11144        public Builder clearName() {
11145          bitField0_ = (bitField0_ & ~0x00000004);
11146          name_ = getDefaultInstance().getName();
11147          onChanged();
11148          return this;
11149        }
11150
11151        // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;
11152        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
11153        private com.google.protobuf.SingleFieldBuilder<
11154            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> fileBuilder_;
11155        /**
11156         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11157         */
11158        public boolean hasFile() {
11159          return ((bitField0_ & 0x00000008) == 0x00000008);
11160        }
11161        /**
11162         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11163         */
11164        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() {
11165          if (fileBuilder_ == null) {
11166            return file_;
11167          } else {
11168            return fileBuilder_.getMessage();
11169          }
11170        }
11171        /**
11172         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11173         */
11174        public Builder setFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
11175          if (fileBuilder_ == null) {
11176            if (value == null) {
11177              throw new NullPointerException();
11178            }
11179            file_ = value;
11180            onChanged();
11181          } else {
11182            fileBuilder_.setMessage(value);
11183          }
11184          bitField0_ |= 0x00000008;
11185          return this;
11186        }
11187        /**
11188         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11189         */
11190        public Builder setFile(
11191            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) {
11192          if (fileBuilder_ == null) {
11193            file_ = builderForValue.build();
11194            onChanged();
11195          } else {
11196            fileBuilder_.setMessage(builderForValue.build());
11197          }
11198          bitField0_ |= 0x00000008;
11199          return this;
11200        }
11201        /**
11202         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11203         */
11204        public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
11205          if (fileBuilder_ == null) {
11206            if (((bitField0_ & 0x00000008) == 0x00000008) &&
11207                file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
11208              file_ =
11209                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(file_).mergeFrom(value).buildPartial();
11210            } else {
11211              file_ = value;
11212            }
11213            onChanged();
11214          } else {
11215            fileBuilder_.mergeFrom(value);
11216          }
11217          bitField0_ |= 0x00000008;
11218          return this;
11219        }
11220        /**
11221         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11222         */
11223        public Builder clearFile() {
11224          if (fileBuilder_ == null) {
11225            file_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
11226            onChanged();
11227          } else {
11228            fileBuilder_.clear();
11229          }
11230          bitField0_ = (bitField0_ & ~0x00000008);
11231          return this;
11232        }
11233        /**
11234         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11235         */
11236        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getFileBuilder() {
11237          bitField0_ |= 0x00000008;
11238          onChanged();
11239          return getFileFieldBuilder().getBuilder();
11240        }
11241        /**
11242         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11243         */
11244        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() {
11245          if (fileBuilder_ != null) {
11246            return fileBuilder_.getMessageOrBuilder();
11247          } else {
11248            return file_;
11249          }
11250        }
11251        /**
11252         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
11253         */
11254        private com.google.protobuf.SingleFieldBuilder<
11255            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> 
11256            getFileFieldBuilder() {
11257          if (fileBuilder_ == null) {
11258            fileBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11259                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>(
11260                    file_,
11261                    getParentForChildren(),
11262                    isClean());
11263            file_ = null;
11264          }
11265          return fileBuilder_;
11266        }
11267
11268        // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;
11269        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
11270        private com.google.protobuf.SingleFieldBuilder<
11271            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> directoryBuilder_;
11272        /**
11273         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11274         */
11275        public boolean hasDirectory() {
11276          return ((bitField0_ & 0x00000010) == 0x00000010);
11277        }
11278        /**
11279         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11280         */
11281        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() {
11282          if (directoryBuilder_ == null) {
11283            return directory_;
11284          } else {
11285            return directoryBuilder_.getMessage();
11286          }
11287        }
11288        /**
11289         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11290         */
11291        public Builder setDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
11292          if (directoryBuilder_ == null) {
11293            if (value == null) {
11294              throw new NullPointerException();
11295            }
11296            directory_ = value;
11297            onChanged();
11298          } else {
11299            directoryBuilder_.setMessage(value);
11300          }
11301          bitField0_ |= 0x00000010;
11302          return this;
11303        }
11304        /**
11305         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11306         */
11307        public Builder setDirectory(
11308            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) {
11309          if (directoryBuilder_ == null) {
11310            directory_ = builderForValue.build();
11311            onChanged();
11312          } else {
11313            directoryBuilder_.setMessage(builderForValue.build());
11314          }
11315          bitField0_ |= 0x00000010;
11316          return this;
11317        }
11318        /**
11319         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11320         */
11321        public Builder mergeDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
11322          if (directoryBuilder_ == null) {
11323            if (((bitField0_ & 0x00000010) == 0x00000010) &&
11324                directory_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) {
11325              directory_ =
11326                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder(directory_).mergeFrom(value).buildPartial();
11327            } else {
11328              directory_ = value;
11329            }
11330            onChanged();
11331          } else {
11332            directoryBuilder_.mergeFrom(value);
11333          }
11334          bitField0_ |= 0x00000010;
11335          return this;
11336        }
11337        /**
11338         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11339         */
11340        public Builder clearDirectory() {
11341          if (directoryBuilder_ == null) {
11342            directory_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
11343            onChanged();
11344          } else {
11345            directoryBuilder_.clear();
11346          }
11347          bitField0_ = (bitField0_ & ~0x00000010);
11348          return this;
11349        }
11350        /**
11351         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11352         */
11353        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getDirectoryBuilder() {
11354          bitField0_ |= 0x00000010;
11355          onChanged();
11356          return getDirectoryFieldBuilder().getBuilder();
11357        }
11358        /**
11359         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11360         */
11361        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() {
11362          if (directoryBuilder_ != null) {
11363            return directoryBuilder_.getMessageOrBuilder();
11364          } else {
11365            return directory_;
11366          }
11367        }
11368        /**
11369         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
11370         */
11371        private com.google.protobuf.SingleFieldBuilder<
11372            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> 
11373            getDirectoryFieldBuilder() {
11374          if (directoryBuilder_ == null) {
11375            directoryBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11376                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>(
11377                    directory_,
11378                    getParentForChildren(),
11379                    isClean());
11380            directory_ = null;
11381          }
11382          return directoryBuilder_;
11383        }
11384
11385        // optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;
11386        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
11387        private com.google.protobuf.SingleFieldBuilder<
11388            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> symlinkBuilder_;
11389        /**
11390         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11391         */
11392        public boolean hasSymlink() {
11393          return ((bitField0_ & 0x00000020) == 0x00000020);
11394        }
11395        /**
11396         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11397         */
11398        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() {
11399          if (symlinkBuilder_ == null) {
11400            return symlink_;
11401          } else {
11402            return symlinkBuilder_.getMessage();
11403          }
11404        }
11405        /**
11406         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11407         */
11408        public Builder setSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) {
11409          if (symlinkBuilder_ == null) {
11410            if (value == null) {
11411              throw new NullPointerException();
11412            }
11413            symlink_ = value;
11414            onChanged();
11415          } else {
11416            symlinkBuilder_.setMessage(value);
11417          }
11418          bitField0_ |= 0x00000020;
11419          return this;
11420        }
11421        /**
11422         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11423         */
11424        public Builder setSymlink(
11425            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder builderForValue) {
11426          if (symlinkBuilder_ == null) {
11427            symlink_ = builderForValue.build();
11428            onChanged();
11429          } else {
11430            symlinkBuilder_.setMessage(builderForValue.build());
11431          }
11432          bitField0_ |= 0x00000020;
11433          return this;
11434        }
11435        /**
11436         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11437         */
11438        public Builder mergeSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) {
11439          if (symlinkBuilder_ == null) {
11440            if (((bitField0_ & 0x00000020) == 0x00000020) &&
11441                symlink_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) {
11442              symlink_ =
11443                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.newBuilder(symlink_).mergeFrom(value).buildPartial();
11444            } else {
11445              symlink_ = value;
11446            }
11447            onChanged();
11448          } else {
11449            symlinkBuilder_.mergeFrom(value);
11450          }
11451          bitField0_ |= 0x00000020;
11452          return this;
11453        }
11454        /**
11455         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11456         */
11457        public Builder clearSymlink() {
11458          if (symlinkBuilder_ == null) {
11459            symlink_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
11460            onChanged();
11461          } else {
11462            symlinkBuilder_.clear();
11463          }
11464          bitField0_ = (bitField0_ & ~0x00000020);
11465          return this;
11466        }
11467        /**
11468         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11469         */
11470        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder getSymlinkBuilder() {
11471          bitField0_ |= 0x00000020;
11472          onChanged();
11473          return getSymlinkFieldBuilder().getBuilder();
11474        }
11475        /**
11476         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11477         */
11478        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() {
11479          if (symlinkBuilder_ != null) {
11480            return symlinkBuilder_.getMessageOrBuilder();
11481          } else {
11482            return symlink_;
11483          }
11484        }
11485        /**
11486         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
11487         */
11488        private com.google.protobuf.SingleFieldBuilder<
11489            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> 
11490            getSymlinkFieldBuilder() {
11491          if (symlinkBuilder_ == null) {
11492            symlinkBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11493                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder>(
11494                    symlink_,
11495                    getParentForChildren(),
11496                    isClean());
11497            symlink_ = null;
11498          }
11499          return symlinkBuilder_;
11500        }
11501
11502        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INode)
11503      }
11504
11505      static {
11506        defaultInstance = new INode(true);
11507        defaultInstance.initFields();
11508      }
11509
11510      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INode)
11511    }
11512
11513    private int bitField0_;
11514    // optional uint64 lastInodeId = 1;
11515    public static final int LASTINODEID_FIELD_NUMBER = 1;
11516    private long lastInodeId_;
11517    /**
11518     * <code>optional uint64 lastInodeId = 1;</code>
11519     */
11520    public boolean hasLastInodeId() {
11521      return ((bitField0_ & 0x00000001) == 0x00000001);
11522    }
11523    /**
11524     * <code>optional uint64 lastInodeId = 1;</code>
11525     */
11526    public long getLastInodeId() {
11527      return lastInodeId_;
11528    }
11529
11530    // optional uint64 numInodes = 2;
11531    public static final int NUMINODES_FIELD_NUMBER = 2;
11532    private long numInodes_;
11533    /**
11534     * <code>optional uint64 numInodes = 2;</code>
11535     *
11536     * <pre>
11537     * repeated INodes..
11538     * </pre>
11539     */
11540    public boolean hasNumInodes() {
11541      return ((bitField0_ & 0x00000002) == 0x00000002);
11542    }
11543    /**
11544     * <code>optional uint64 numInodes = 2;</code>
11545     *
11546     * <pre>
11547     * repeated INodes..
11548     * </pre>
11549     */
11550    public long getNumInodes() {
11551      return numInodes_;
11552    }
11553
11554    private void initFields() {
11555      lastInodeId_ = 0L;
11556      numInodes_ = 0L;
11557    }
11558    private byte memoizedIsInitialized = -1;
11559    public final boolean isInitialized() {
11560      byte isInitialized = memoizedIsInitialized;
11561      if (isInitialized != -1) return isInitialized == 1;
11562
11563      memoizedIsInitialized = 1;
11564      return true;
11565    }
11566
11567    public void writeTo(com.google.protobuf.CodedOutputStream output)
11568                        throws java.io.IOException {
11569      getSerializedSize();
11570      if (((bitField0_ & 0x00000001) == 0x00000001)) {
11571        output.writeUInt64(1, lastInodeId_);
11572      }
11573      if (((bitField0_ & 0x00000002) == 0x00000002)) {
11574        output.writeUInt64(2, numInodes_);
11575      }
11576      getUnknownFields().writeTo(output);
11577    }
11578
11579    private int memoizedSerializedSize = -1;
11580    public int getSerializedSize() {
11581      int size = memoizedSerializedSize;
11582      if (size != -1) return size;
11583
11584      size = 0;
11585      if (((bitField0_ & 0x00000001) == 0x00000001)) {
11586        size += com.google.protobuf.CodedOutputStream
11587          .computeUInt64Size(1, lastInodeId_);
11588      }
11589      if (((bitField0_ & 0x00000002) == 0x00000002)) {
11590        size += com.google.protobuf.CodedOutputStream
11591          .computeUInt64Size(2, numInodes_);
11592      }
11593      size += getUnknownFields().getSerializedSize();
11594      memoizedSerializedSize = size;
11595      return size;
11596    }
11597
11598    private static final long serialVersionUID = 0L;
11599    @java.lang.Override
11600    protected java.lang.Object writeReplace()
11601        throws java.io.ObjectStreamException {
11602      return super.writeReplace();
11603    }
11604
11605    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11606        com.google.protobuf.ByteString data)
11607        throws com.google.protobuf.InvalidProtocolBufferException {
11608      return PARSER.parseFrom(data);
11609    }
11610    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11611        com.google.protobuf.ByteString data,
11612        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11613        throws com.google.protobuf.InvalidProtocolBufferException {
11614      return PARSER.parseFrom(data, extensionRegistry);
11615    }
11616    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(byte[] data)
11617        throws com.google.protobuf.InvalidProtocolBufferException {
11618      return PARSER.parseFrom(data);
11619    }
11620    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11621        byte[] data,
11622        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11623        throws com.google.protobuf.InvalidProtocolBufferException {
11624      return PARSER.parseFrom(data, extensionRegistry);
11625    }
11626    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(java.io.InputStream input)
11627        throws java.io.IOException {
11628      return PARSER.parseFrom(input);
11629    }
11630    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11631        java.io.InputStream input,
11632        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11633        throws java.io.IOException {
11634      return PARSER.parseFrom(input, extensionRegistry);
11635    }
11636    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom(java.io.InputStream input)
11637        throws java.io.IOException {
11638      return PARSER.parseDelimitedFrom(input);
11639    }
11640    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom(
11641        java.io.InputStream input,
11642        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11643        throws java.io.IOException {
11644      return PARSER.parseDelimitedFrom(input, extensionRegistry);
11645    }
11646    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11647        com.google.protobuf.CodedInputStream input)
11648        throws java.io.IOException {
11649      return PARSER.parseFrom(input);
11650    }
11651    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
11652        com.google.protobuf.CodedInputStream input,
11653        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11654        throws java.io.IOException {
11655      return PARSER.parseFrom(input, extensionRegistry);
11656    }
11657
11658    public static Builder newBuilder() { return Builder.create(); }
11659    public Builder newBuilderForType() { return newBuilder(); }
11660    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection prototype) {
11661      return newBuilder().mergeFrom(prototype);
11662    }
11663    public Builder toBuilder() { return newBuilder(this); }
11664
11665    @java.lang.Override
11666    protected Builder newBuilderForType(
11667        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11668      Builder builder = new Builder(parent);
11669      return builder;
11670    }
11671    /**
11672     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection}
11673     *
11674     * <pre>
11675     **
11676     * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
11677     * The first and the second parts are the string ids of the user and
11678     * group name, and the last 16 bits are the permission bits.
11679     *
11680     * Name: INODE
11681     * </pre>
11682     */
11683    public static final class Builder extends
11684        com.google.protobuf.GeneratedMessage.Builder<Builder>
11685       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSectionOrBuilder {
11686      public static final com.google.protobuf.Descriptors.Descriptor
11687          getDescriptor() {
11688        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
11689      }
11690
11691      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11692          internalGetFieldAccessorTable() {
11693        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable
11694            .ensureFieldAccessorsInitialized(
11695                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class);
11696      }
11697
11698      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.newBuilder()
11699      private Builder() {
11700        maybeForceBuilderInitialization();
11701      }
11702
11703      private Builder(
11704          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11705        super(parent);
11706        maybeForceBuilderInitialization();
11707      }
11708      private void maybeForceBuilderInitialization() {
11709        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11710        }
11711      }
11712      private static Builder create() {
11713        return new Builder();
11714      }
11715
11716      public Builder clear() {
11717        super.clear();
11718        lastInodeId_ = 0L;
11719        bitField0_ = (bitField0_ & ~0x00000001);
11720        numInodes_ = 0L;
11721        bitField0_ = (bitField0_ & ~0x00000002);
11722        return this;
11723      }
11724
11725      public Builder clone() {
11726        return create().mergeFrom(buildPartial());
11727      }
11728
11729      public com.google.protobuf.Descriptors.Descriptor
11730          getDescriptorForType() {
11731        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
11732      }
11733
11734      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstanceForType() {
11735        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance();
11736      }
11737
11738      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection build() {
11739        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = buildPartial();
11740        if (!result.isInitialized()) {
11741          throw newUninitializedMessageException(result);
11742        }
11743        return result;
11744      }
11745
11746      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection buildPartial() {
11747        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection(this);
11748        int from_bitField0_ = bitField0_;
11749        int to_bitField0_ = 0;
11750        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11751          to_bitField0_ |= 0x00000001;
11752        }
11753        result.lastInodeId_ = lastInodeId_;
11754        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11755          to_bitField0_ |= 0x00000002;
11756        }
11757        result.numInodes_ = numInodes_;
11758        result.bitField0_ = to_bitField0_;
11759        onBuilt();
11760        return result;
11761      }
11762
11763      public Builder mergeFrom(com.google.protobuf.Message other) {
11764        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) {
11765          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)other);
11766        } else {
11767          super.mergeFrom(other);
11768          return this;
11769        }
11770      }
11771
11772      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection other) {
11773        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance()) return this;
11774        if (other.hasLastInodeId()) {
11775          setLastInodeId(other.getLastInodeId());
11776        }
11777        if (other.hasNumInodes()) {
11778          setNumInodes(other.getNumInodes());
11779        }
11780        this.mergeUnknownFields(other.getUnknownFields());
11781        return this;
11782      }
11783
11784      public final boolean isInitialized() {
11785        return true;
11786      }
11787
11788      public Builder mergeFrom(
11789          com.google.protobuf.CodedInputStream input,
11790          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11791          throws java.io.IOException {
11792        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parsedMessage = null;
11793        try {
11794          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11795        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11796          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) e.getUnfinishedMessage();
11797          throw e;
11798        } finally {
11799          if (parsedMessage != null) {
11800            mergeFrom(parsedMessage);
11801          }
11802        }
11803        return this;
11804      }
11805      private int bitField0_;
11806
11807      // optional uint64 lastInodeId = 1;
11808      private long lastInodeId_ ;
11809      /**
11810       * <code>optional uint64 lastInodeId = 1;</code>
11811       */
11812      public boolean hasLastInodeId() {
11813        return ((bitField0_ & 0x00000001) == 0x00000001);
11814      }
11815      /**
11816       * <code>optional uint64 lastInodeId = 1;</code>
11817       */
11818      public long getLastInodeId() {
11819        return lastInodeId_;
11820      }
11821      /**
11822       * <code>optional uint64 lastInodeId = 1;</code>
11823       */
11824      public Builder setLastInodeId(long value) {
11825        bitField0_ |= 0x00000001;
11826        lastInodeId_ = value;
11827        onChanged();
11828        return this;
11829      }
11830      /**
11831       * <code>optional uint64 lastInodeId = 1;</code>
11832       */
11833      public Builder clearLastInodeId() {
11834        bitField0_ = (bitField0_ & ~0x00000001);
11835        lastInodeId_ = 0L;
11836        onChanged();
11837        return this;
11838      }
11839
11840      // optional uint64 numInodes = 2;
11841      private long numInodes_ ;
11842      /**
11843       * <code>optional uint64 numInodes = 2;</code>
11844       *
11845       * <pre>
11846       * repeated INodes..
11847       * </pre>
11848       */
11849      public boolean hasNumInodes() {
11850        return ((bitField0_ & 0x00000002) == 0x00000002);
11851      }
11852      /**
11853       * <code>optional uint64 numInodes = 2;</code>
11854       *
11855       * <pre>
11856       * repeated INodes..
11857       * </pre>
11858       */
11859      public long getNumInodes() {
11860        return numInodes_;
11861      }
11862      /**
11863       * <code>optional uint64 numInodes = 2;</code>
11864       *
11865       * <pre>
11866       * repeated INodes..
11867       * </pre>
11868       */
11869      public Builder setNumInodes(long value) {
11870        bitField0_ |= 0x00000002;
11871        numInodes_ = value;
11872        onChanged();
11873        return this;
11874      }
11875      /**
11876       * <code>optional uint64 numInodes = 2;</code>
11877       *
11878       * <pre>
11879       * repeated INodes..
11880       * </pre>
11881       */
11882      public Builder clearNumInodes() {
11883        bitField0_ = (bitField0_ & ~0x00000002);
11884        numInodes_ = 0L;
11885        onChanged();
11886        return this;
11887      }
11888
11889      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection)
11890    }
11891
11892    static {
11893      defaultInstance = new INodeSection(true);
11894      defaultInstance.initFields();
11895    }
11896
11897    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection)
11898  }
11899
11900  public interface FilesUnderConstructionSectionOrBuilder
11901      extends com.google.protobuf.MessageOrBuilder {
11902  }
11903  /**
11904   * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection}
11905   *
11906   * <pre>
11907   **
11908   * This section records information about under-construction files for
11909   * reconstructing the lease map.
11910   * NAME: FILES_UNDERCONSTRUCTION
11911   * </pre>
11912   */
11913  public static final class FilesUnderConstructionSection extends
11914      com.google.protobuf.GeneratedMessage
11915      implements FilesUnderConstructionSectionOrBuilder {
11916    // Use FilesUnderConstructionSection.newBuilder() to construct.
11917    private FilesUnderConstructionSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11918      super(builder);
11919      this.unknownFields = builder.getUnknownFields();
11920    }
11921    private FilesUnderConstructionSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11922
11923    private static final FilesUnderConstructionSection defaultInstance;
11924    public static FilesUnderConstructionSection getDefaultInstance() {
11925      return defaultInstance;
11926    }
11927
11928    public FilesUnderConstructionSection getDefaultInstanceForType() {
11929      return defaultInstance;
11930    }
11931
11932    private final com.google.protobuf.UnknownFieldSet unknownFields;
11933    @java.lang.Override
11934    public final com.google.protobuf.UnknownFieldSet
11935        getUnknownFields() {
11936      return this.unknownFields;
11937    }
11938    private FilesUnderConstructionSection(
11939        com.google.protobuf.CodedInputStream input,
11940        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11941        throws com.google.protobuf.InvalidProtocolBufferException {
11942      initFields();
11943      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11944          com.google.protobuf.UnknownFieldSet.newBuilder();
11945      try {
11946        boolean done = false;
11947        while (!done) {
11948          int tag = input.readTag();
11949          switch (tag) {
11950            case 0:
11951              done = true;
11952              break;
11953            default: {
11954              if (!parseUnknownField(input, unknownFields,
11955                                     extensionRegistry, tag)) {
11956                done = true;
11957              }
11958              break;
11959            }
11960          }
11961        }
11962      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11963        throw e.setUnfinishedMessage(this);
11964      } catch (java.io.IOException e) {
11965        throw new com.google.protobuf.InvalidProtocolBufferException(
11966            e.getMessage()).setUnfinishedMessage(this);
11967      } finally {
11968        this.unknownFields = unknownFields.build();
11969        makeExtensionsImmutable();
11970      }
11971    }
11972    public static final com.google.protobuf.Descriptors.Descriptor
11973        getDescriptor() {
11974      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
11975    }
11976
11977    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11978        internalGetFieldAccessorTable() {
11979      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable
11980          .ensureFieldAccessorsInitialized(
11981              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class);
11982    }
11983
11984    public static com.google.protobuf.Parser<FilesUnderConstructionSection> PARSER =
11985        new com.google.protobuf.AbstractParser<FilesUnderConstructionSection>() {
11986      public FilesUnderConstructionSection parsePartialFrom(
11987          com.google.protobuf.CodedInputStream input,
11988          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11989          throws com.google.protobuf.InvalidProtocolBufferException {
11990        return new FilesUnderConstructionSection(input, extensionRegistry);
11991      }
11992    };
11993
11994    @java.lang.Override
11995    public com.google.protobuf.Parser<FilesUnderConstructionSection> getParserForType() {
11996      return PARSER;
11997    }
11998
11999    public interface FileUnderConstructionEntryOrBuilder
12000        extends com.google.protobuf.MessageOrBuilder {
12001
12002      // optional uint64 inodeId = 1;
12003      /**
12004       * <code>optional uint64 inodeId = 1;</code>
12005       */
12006      boolean hasInodeId();
12007      /**
12008       * <code>optional uint64 inodeId = 1;</code>
12009       */
12010      long getInodeId();
12011
12012      // optional string fullPath = 2;
12013      /**
12014       * <code>optional string fullPath = 2;</code>
12015       */
12016      boolean hasFullPath();
12017      /**
12018       * <code>optional string fullPath = 2;</code>
12019       */
12020      java.lang.String getFullPath();
12021      /**
12022       * <code>optional string fullPath = 2;</code>
12023       */
12024      com.google.protobuf.ByteString
12025          getFullPathBytes();
12026    }
12027    /**
12028     * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry}
12029     */
12030    public static final class FileUnderConstructionEntry extends
12031        com.google.protobuf.GeneratedMessage
12032        implements FileUnderConstructionEntryOrBuilder {
12033      // Use FileUnderConstructionEntry.newBuilder() to construct.
12034      private FileUnderConstructionEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12035        super(builder);
12036        this.unknownFields = builder.getUnknownFields();
12037      }
12038      private FileUnderConstructionEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12039
12040      private static final FileUnderConstructionEntry defaultInstance;
12041      public static FileUnderConstructionEntry getDefaultInstance() {
12042        return defaultInstance;
12043      }
12044
12045      public FileUnderConstructionEntry getDefaultInstanceForType() {
12046        return defaultInstance;
12047      }
12048
12049      private final com.google.protobuf.UnknownFieldSet unknownFields;
12050      @java.lang.Override
12051      public final com.google.protobuf.UnknownFieldSet
12052          getUnknownFields() {
12053        return this.unknownFields;
12054      }
12055      private FileUnderConstructionEntry(
12056          com.google.protobuf.CodedInputStream input,
12057          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12058          throws com.google.protobuf.InvalidProtocolBufferException {
12059        initFields();
12060        int mutable_bitField0_ = 0;
12061        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12062            com.google.protobuf.UnknownFieldSet.newBuilder();
12063        try {
12064          boolean done = false;
12065          while (!done) {
12066            int tag = input.readTag();
12067            switch (tag) {
12068              case 0:
12069                done = true;
12070                break;
12071              default: {
12072                if (!parseUnknownField(input, unknownFields,
12073                                       extensionRegistry, tag)) {
12074                  done = true;
12075                }
12076                break;
12077              }
12078              case 8: {
12079                bitField0_ |= 0x00000001;
12080                inodeId_ = input.readUInt64();
12081                break;
12082              }
12083              case 18: {
12084                bitField0_ |= 0x00000002;
12085                fullPath_ = input.readBytes();
12086                break;
12087              }
12088            }
12089          }
12090        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12091          throw e.setUnfinishedMessage(this);
12092        } catch (java.io.IOException e) {
12093          throw new com.google.protobuf.InvalidProtocolBufferException(
12094              e.getMessage()).setUnfinishedMessage(this);
12095        } finally {
12096          this.unknownFields = unknownFields.build();
12097          makeExtensionsImmutable();
12098        }
12099      }
12100      public static final com.google.protobuf.Descriptors.Descriptor
12101          getDescriptor() {
12102        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
12103      }
12104
12105      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12106          internalGetFieldAccessorTable() {
12107        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable
12108            .ensureFieldAccessorsInitialized(
12109                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class);
12110      }
12111
12112      public static com.google.protobuf.Parser<FileUnderConstructionEntry> PARSER =
12113          new com.google.protobuf.AbstractParser<FileUnderConstructionEntry>() {
12114        public FileUnderConstructionEntry parsePartialFrom(
12115            com.google.protobuf.CodedInputStream input,
12116            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12117            throws com.google.protobuf.InvalidProtocolBufferException {
12118          return new FileUnderConstructionEntry(input, extensionRegistry);
12119        }
12120      };
12121
12122      @java.lang.Override
12123      public com.google.protobuf.Parser<FileUnderConstructionEntry> getParserForType() {
12124        return PARSER;
12125      }
12126
12127      private int bitField0_;
12128      // optional uint64 inodeId = 1;
12129      public static final int INODEID_FIELD_NUMBER = 1;
12130      private long inodeId_;
12131      /**
12132       * <code>optional uint64 inodeId = 1;</code>
12133       */
12134      public boolean hasInodeId() {
12135        return ((bitField0_ & 0x00000001) == 0x00000001);
12136      }
12137      /**
12138       * <code>optional uint64 inodeId = 1;</code>
12139       */
12140      public long getInodeId() {
12141        return inodeId_;
12142      }
12143
12144      // optional string fullPath = 2;
12145      public static final int FULLPATH_FIELD_NUMBER = 2;
12146      private java.lang.Object fullPath_;
12147      /**
12148       * <code>optional string fullPath = 2;</code>
12149       */
12150      public boolean hasFullPath() {
12151        return ((bitField0_ & 0x00000002) == 0x00000002);
12152      }
12153      /**
12154       * <code>optional string fullPath = 2;</code>
12155       */
12156      public java.lang.String getFullPath() {
12157        java.lang.Object ref = fullPath_;
12158        if (ref instanceof java.lang.String) {
12159          return (java.lang.String) ref;
12160        } else {
12161          com.google.protobuf.ByteString bs = 
12162              (com.google.protobuf.ByteString) ref;
12163          java.lang.String s = bs.toStringUtf8();
12164          if (bs.isValidUtf8()) {
12165            fullPath_ = s;
12166          }
12167          return s;
12168        }
12169      }
12170      /**
12171       * <code>optional string fullPath = 2;</code>
12172       */
12173      public com.google.protobuf.ByteString
12174          getFullPathBytes() {
12175        java.lang.Object ref = fullPath_;
12176        if (ref instanceof java.lang.String) {
12177          com.google.protobuf.ByteString b = 
12178              com.google.protobuf.ByteString.copyFromUtf8(
12179                  (java.lang.String) ref);
12180          fullPath_ = b;
12181          return b;
12182        } else {
12183          return (com.google.protobuf.ByteString) ref;
12184        }
12185      }
12186
12187      private void initFields() {
12188        inodeId_ = 0L;
12189        fullPath_ = "";
12190      }
12191      private byte memoizedIsInitialized = -1;
12192      public final boolean isInitialized() {
12193        byte isInitialized = memoizedIsInitialized;
12194        if (isInitialized != -1) return isInitialized == 1;
12195
12196        memoizedIsInitialized = 1;
12197        return true;
12198      }
12199
12200      public void writeTo(com.google.protobuf.CodedOutputStream output)
12201                          throws java.io.IOException {
12202        getSerializedSize();
12203        if (((bitField0_ & 0x00000001) == 0x00000001)) {
12204          output.writeUInt64(1, inodeId_);
12205        }
12206        if (((bitField0_ & 0x00000002) == 0x00000002)) {
12207          output.writeBytes(2, getFullPathBytes());
12208        }
12209        getUnknownFields().writeTo(output);
12210      }
12211
12212      private int memoizedSerializedSize = -1;
12213      public int getSerializedSize() {
12214        int size = memoizedSerializedSize;
12215        if (size != -1) return size;
12216
12217        size = 0;
12218        if (((bitField0_ & 0x00000001) == 0x00000001)) {
12219          size += com.google.protobuf.CodedOutputStream
12220            .computeUInt64Size(1, inodeId_);
12221        }
12222        if (((bitField0_ & 0x00000002) == 0x00000002)) {
12223          size += com.google.protobuf.CodedOutputStream
12224            .computeBytesSize(2, getFullPathBytes());
12225        }
12226        size += getUnknownFields().getSerializedSize();
12227        memoizedSerializedSize = size;
12228        return size;
12229      }
12230
12231      private static final long serialVersionUID = 0L;
12232      @java.lang.Override
12233      protected java.lang.Object writeReplace()
12234          throws java.io.ObjectStreamException {
12235        return super.writeReplace();
12236      }
12237
12238      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12239          com.google.protobuf.ByteString data)
12240          throws com.google.protobuf.InvalidProtocolBufferException {
12241        return PARSER.parseFrom(data);
12242      }
12243      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12244          com.google.protobuf.ByteString data,
12245          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12246          throws com.google.protobuf.InvalidProtocolBufferException {
12247        return PARSER.parseFrom(data, extensionRegistry);
12248      }
12249      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(byte[] data)
12250          throws com.google.protobuf.InvalidProtocolBufferException {
12251        return PARSER.parseFrom(data);
12252      }
12253      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12254          byte[] data,
12255          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12256          throws com.google.protobuf.InvalidProtocolBufferException {
12257        return PARSER.parseFrom(data, extensionRegistry);
12258      }
12259      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(java.io.InputStream input)
12260          throws java.io.IOException {
12261        return PARSER.parseFrom(input);
12262      }
12263      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12264          java.io.InputStream input,
12265          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12266          throws java.io.IOException {
12267        return PARSER.parseFrom(input, extensionRegistry);
12268      }
12269      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom(java.io.InputStream input)
12270          throws java.io.IOException {
12271        return PARSER.parseDelimitedFrom(input);
12272      }
12273      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom(
12274          java.io.InputStream input,
12275          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12276          throws java.io.IOException {
12277        return PARSER.parseDelimitedFrom(input, extensionRegistry);
12278      }
12279      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12280          com.google.protobuf.CodedInputStream input)
12281          throws java.io.IOException {
12282        return PARSER.parseFrom(input);
12283      }
12284      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
12285          com.google.protobuf.CodedInputStream input,
12286          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12287          throws java.io.IOException {
12288        return PARSER.parseFrom(input, extensionRegistry);
12289      }
12290
12291      public static Builder newBuilder() { return Builder.create(); }
12292      public Builder newBuilderForType() { return newBuilder(); }
12293      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry prototype) {
12294        return newBuilder().mergeFrom(prototype);
12295      }
12296      public Builder toBuilder() { return newBuilder(this); }
12297
12298      @java.lang.Override
12299      protected Builder newBuilderForType(
12300          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12301        Builder builder = new Builder(parent);
12302        return builder;
12303      }
12304      /**
12305       * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry}
12306       */
12307      public static final class Builder extends
12308          com.google.protobuf.GeneratedMessage.Builder<Builder>
12309         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntryOrBuilder {
12310        public static final com.google.protobuf.Descriptors.Descriptor
12311            getDescriptor() {
12312          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
12313        }
12314
12315        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12316            internalGetFieldAccessorTable() {
12317          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable
12318              .ensureFieldAccessorsInitialized(
12319                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class);
12320        }
12321
12322        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.newBuilder()
12323        private Builder() {
12324          maybeForceBuilderInitialization();
12325        }
12326
12327        private Builder(
12328            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12329          super(parent);
12330          maybeForceBuilderInitialization();
12331        }
12332        private void maybeForceBuilderInitialization() {
12333          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12334          }
12335        }
12336        private static Builder create() {
12337          return new Builder();
12338        }
12339
12340        public Builder clear() {
12341          super.clear();
12342          inodeId_ = 0L;
12343          bitField0_ = (bitField0_ & ~0x00000001);
12344          fullPath_ = "";
12345          bitField0_ = (bitField0_ & ~0x00000002);
12346          return this;
12347        }
12348
12349        public Builder clone() {
12350          return create().mergeFrom(buildPartial());
12351        }
12352
12353        public com.google.protobuf.Descriptors.Descriptor
12354            getDescriptorForType() {
12355          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
12356        }
12357
12358        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstanceForType() {
12359          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance();
12360        }
12361
12362        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry build() {
12363          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = buildPartial();
12364          if (!result.isInitialized()) {
12365            throw newUninitializedMessageException(result);
12366          }
12367          return result;
12368        }
12369
12370        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry buildPartial() {
12371          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry(this);
12372          int from_bitField0_ = bitField0_;
12373          int to_bitField0_ = 0;
12374          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12375            to_bitField0_ |= 0x00000001;
12376          }
12377          result.inodeId_ = inodeId_;
12378          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
12379            to_bitField0_ |= 0x00000002;
12380          }
12381          result.fullPath_ = fullPath_;
12382          result.bitField0_ = to_bitField0_;
12383          onBuilt();
12384          return result;
12385        }
12386
12387        public Builder mergeFrom(com.google.protobuf.Message other) {
12388          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) {
12389            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry)other);
12390          } else {
12391            super.mergeFrom(other);
12392            return this;
12393          }
12394        }
12395
12396        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry other) {
12397          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance()) return this;
12398          if (other.hasInodeId()) {
12399            setInodeId(other.getInodeId());
12400          }
12401          if (other.hasFullPath()) {
12402            bitField0_ |= 0x00000002;
12403            fullPath_ = other.fullPath_;
12404            onChanged();
12405          }
12406          this.mergeUnknownFields(other.getUnknownFields());
12407          return this;
12408        }
12409
12410        public final boolean isInitialized() {
12411          return true;
12412        }
12413
12414        public Builder mergeFrom(
12415            com.google.protobuf.CodedInputStream input,
12416            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12417            throws java.io.IOException {
12418          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parsedMessage = null;
12419          try {
12420            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12421          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12422            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) e.getUnfinishedMessage();
12423            throw e;
12424          } finally {
12425            if (parsedMessage != null) {
12426              mergeFrom(parsedMessage);
12427            }
12428          }
12429          return this;
12430        }
12431        private int bitField0_;
12432
12433        // optional uint64 inodeId = 1;
12434        private long inodeId_ ;
12435        /**
12436         * <code>optional uint64 inodeId = 1;</code>
12437         */
12438        public boolean hasInodeId() {
12439          return ((bitField0_ & 0x00000001) == 0x00000001);
12440        }
12441        /**
12442         * <code>optional uint64 inodeId = 1;</code>
12443         */
12444        public long getInodeId() {
12445          return inodeId_;
12446        }
12447        /**
12448         * <code>optional uint64 inodeId = 1;</code>
12449         */
12450        public Builder setInodeId(long value) {
12451          bitField0_ |= 0x00000001;
12452          inodeId_ = value;
12453          onChanged();
12454          return this;
12455        }
12456        /**
12457         * <code>optional uint64 inodeId = 1;</code>
12458         */
12459        public Builder clearInodeId() {
12460          bitField0_ = (bitField0_ & ~0x00000001);
12461          inodeId_ = 0L;
12462          onChanged();
12463          return this;
12464        }
12465
12466        // optional string fullPath = 2;
12467        private java.lang.Object fullPath_ = "";
12468        /**
12469         * <code>optional string fullPath = 2;</code>
12470         */
12471        public boolean hasFullPath() {
12472          return ((bitField0_ & 0x00000002) == 0x00000002);
12473        }
12474        /**
12475         * <code>optional string fullPath = 2;</code>
12476         */
12477        public java.lang.String getFullPath() {
12478          java.lang.Object ref = fullPath_;
12479          if (!(ref instanceof java.lang.String)) {
12480            java.lang.String s = ((com.google.protobuf.ByteString) ref)
12481                .toStringUtf8();
12482            fullPath_ = s;
12483            return s;
12484          } else {
12485            return (java.lang.String) ref;
12486          }
12487        }
12488        /**
12489         * <code>optional string fullPath = 2;</code>
12490         */
12491        public com.google.protobuf.ByteString
12492            getFullPathBytes() {
12493          java.lang.Object ref = fullPath_;
12494          if (ref instanceof String) {
12495            com.google.protobuf.ByteString b = 
12496                com.google.protobuf.ByteString.copyFromUtf8(
12497                    (java.lang.String) ref);
12498            fullPath_ = b;
12499            return b;
12500          } else {
12501            return (com.google.protobuf.ByteString) ref;
12502          }
12503        }
12504        /**
12505         * <code>optional string fullPath = 2;</code>
12506         */
12507        public Builder setFullPath(
12508            java.lang.String value) {
12509          if (value == null) {
12510    throw new NullPointerException();
12511  }
12512  bitField0_ |= 0x00000002;
12513          fullPath_ = value;
12514          onChanged();
12515          return this;
12516        }
12517        /**
12518         * <code>optional string fullPath = 2;</code>
12519         */
12520        public Builder clearFullPath() {
12521          bitField0_ = (bitField0_ & ~0x00000002);
12522          fullPath_ = getDefaultInstance().getFullPath();
12523          onChanged();
12524          return this;
12525        }
12526        /**
12527         * <code>optional string fullPath = 2;</code>
12528         */
12529        public Builder setFullPathBytes(
12530            com.google.protobuf.ByteString value) {
12531          if (value == null) {
12532    throw new NullPointerException();
12533  }
12534  bitField0_ |= 0x00000002;
12535          fullPath_ = value;
12536          onChanged();
12537          return this;
12538        }
12539
12540        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
12541      }
12542
12543      static {
12544        defaultInstance = new FileUnderConstructionEntry(true);
12545        defaultInstance.initFields();
12546      }
12547
12548      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
12549    }
12550
12551    private void initFields() {
12552    }
12553    private byte memoizedIsInitialized = -1;
12554    public final boolean isInitialized() {
12555      byte isInitialized = memoizedIsInitialized;
12556      if (isInitialized != -1) return isInitialized == 1;
12557
12558      memoizedIsInitialized = 1;
12559      return true;
12560    }
12561
12562    public void writeTo(com.google.protobuf.CodedOutputStream output)
12563                        throws java.io.IOException {
12564      getSerializedSize();
12565      getUnknownFields().writeTo(output);
12566    }
12567
12568    private int memoizedSerializedSize = -1;
12569    public int getSerializedSize() {
12570      int size = memoizedSerializedSize;
12571      if (size != -1) return size;
12572
12573      size = 0;
12574      size += getUnknownFields().getSerializedSize();
12575      memoizedSerializedSize = size;
12576      return size;
12577    }
12578
12579    private static final long serialVersionUID = 0L;
12580    @java.lang.Override
12581    protected java.lang.Object writeReplace()
12582        throws java.io.ObjectStreamException {
12583      return super.writeReplace();
12584    }
12585
12586    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12587        com.google.protobuf.ByteString data)
12588        throws com.google.protobuf.InvalidProtocolBufferException {
12589      return PARSER.parseFrom(data);
12590    }
12591    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12592        com.google.protobuf.ByteString data,
12593        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12594        throws com.google.protobuf.InvalidProtocolBufferException {
12595      return PARSER.parseFrom(data, extensionRegistry);
12596    }
12597    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(byte[] data)
12598        throws com.google.protobuf.InvalidProtocolBufferException {
12599      return PARSER.parseFrom(data);
12600    }
12601    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12602        byte[] data,
12603        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12604        throws com.google.protobuf.InvalidProtocolBufferException {
12605      return PARSER.parseFrom(data, extensionRegistry);
12606    }
12607    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(java.io.InputStream input)
12608        throws java.io.IOException {
12609      return PARSER.parseFrom(input);
12610    }
12611    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12612        java.io.InputStream input,
12613        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12614        throws java.io.IOException {
12615      return PARSER.parseFrom(input, extensionRegistry);
12616    }
12617    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom(java.io.InputStream input)
12618        throws java.io.IOException {
12619      return PARSER.parseDelimitedFrom(input);
12620    }
12621    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom(
12622        java.io.InputStream input,
12623        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12624        throws java.io.IOException {
12625      return PARSER.parseDelimitedFrom(input, extensionRegistry);
12626    }
12627    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12628        com.google.protobuf.CodedInputStream input)
12629        throws java.io.IOException {
12630      return PARSER.parseFrom(input);
12631    }
12632    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
12633        com.google.protobuf.CodedInputStream input,
12634        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12635        throws java.io.IOException {
12636      return PARSER.parseFrom(input, extensionRegistry);
12637    }
12638
12639    public static Builder newBuilder() { return Builder.create(); }
12640    public Builder newBuilderForType() { return newBuilder(); }
12641    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection prototype) {
12642      return newBuilder().mergeFrom(prototype);
12643    }
12644    public Builder toBuilder() { return newBuilder(this); }
12645
12646    @java.lang.Override
12647    protected Builder newBuilderForType(
12648        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12649      Builder builder = new Builder(parent);
12650      return builder;
12651    }
12652    /**
12653     * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection}
12654     *
12655     * <pre>
12656     **
12657     * This section records information about under-construction files for
12658     * reconstructing the lease map.
12659     * NAME: FILES_UNDERCONSTRUCTION
12660     * </pre>
12661     */
12662    public static final class Builder extends
12663        com.google.protobuf.GeneratedMessage.Builder<Builder>
12664       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSectionOrBuilder {
12665      public static final com.google.protobuf.Descriptors.Descriptor
12666          getDescriptor() {
12667        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
12668      }
12669
12670      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12671          internalGetFieldAccessorTable() {
12672        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable
12673            .ensureFieldAccessorsInitialized(
12674                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class);
12675      }
12676
12677      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.newBuilder()
12678      private Builder() {
12679        maybeForceBuilderInitialization();
12680      }
12681
12682      private Builder(
12683          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12684        super(parent);
12685        maybeForceBuilderInitialization();
12686      }
12687      private void maybeForceBuilderInitialization() {
12688        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12689        }
12690      }
12691      private static Builder create() {
12692        return new Builder();
12693      }
12694
12695      public Builder clear() {
12696        super.clear();
12697        return this;
12698      }
12699
12700      public Builder clone() {
12701        return create().mergeFrom(buildPartial());
12702      }
12703
12704      public com.google.protobuf.Descriptors.Descriptor
12705          getDescriptorForType() {
12706        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
12707      }
12708
12709      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstanceForType() {
12710        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance();
12711      }
12712
12713      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection build() {
12714        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = buildPartial();
12715        if (!result.isInitialized()) {
12716          throw newUninitializedMessageException(result);
12717        }
12718        return result;
12719      }
12720
12721      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection buildPartial() {
12722        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection(this);
12723        onBuilt();
12724        return result;
12725      }
12726
12727      public Builder mergeFrom(com.google.protobuf.Message other) {
12728        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) {
12729          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection)other);
12730        } else {
12731          super.mergeFrom(other);
12732          return this;
12733        }
12734      }
12735
12736      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection other) {
12737        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance()) return this;
12738        this.mergeUnknownFields(other.getUnknownFields());
12739        return this;
12740      }
12741
12742      public final boolean isInitialized() {
12743        return true;
12744      }
12745
12746      public Builder mergeFrom(
12747          com.google.protobuf.CodedInputStream input,
12748          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12749          throws java.io.IOException {
12750        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parsedMessage = null;
12751        try {
12752          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12753        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12754          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) e.getUnfinishedMessage();
12755          throw e;
12756        } finally {
12757          if (parsedMessage != null) {
12758            mergeFrom(parsedMessage);
12759          }
12760        }
12761        return this;
12762      }
12763
12764      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
12765    }
12766
12767    static {
12768      defaultInstance = new FilesUnderConstructionSection(true);
12769      defaultInstance.initFields();
12770    }
12771
12772    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
12773  }
12774
12775  public interface INodeDirectorySectionOrBuilder
12776      extends com.google.protobuf.MessageOrBuilder {
12777  }
12778  /**
12779   * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection}
12780   *
12781   * <pre>
12782   **
12783   * This section records the children of each directories
12784   * NAME: INODE_DIR
12785   * </pre>
12786   */
12787  public static final class INodeDirectorySection extends
12788      com.google.protobuf.GeneratedMessage
12789      implements INodeDirectorySectionOrBuilder {
12790    // Use INodeDirectorySection.newBuilder() to construct.
12791    private INodeDirectorySection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12792      super(builder);
12793      this.unknownFields = builder.getUnknownFields();
12794    }
12795    private INodeDirectorySection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12796
12797    private static final INodeDirectorySection defaultInstance;
12798    public static INodeDirectorySection getDefaultInstance() {
12799      return defaultInstance;
12800    }
12801
12802    public INodeDirectorySection getDefaultInstanceForType() {
12803      return defaultInstance;
12804    }
12805
12806    private final com.google.protobuf.UnknownFieldSet unknownFields;
12807    @java.lang.Override
12808    public final com.google.protobuf.UnknownFieldSet
12809        getUnknownFields() {
12810      return this.unknownFields;
12811    }
12812    private INodeDirectorySection(
12813        com.google.protobuf.CodedInputStream input,
12814        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12815        throws com.google.protobuf.InvalidProtocolBufferException {
12816      initFields();
12817      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12818          com.google.protobuf.UnknownFieldSet.newBuilder();
12819      try {
12820        boolean done = false;
12821        while (!done) {
12822          int tag = input.readTag();
12823          switch (tag) {
12824            case 0:
12825              done = true;
12826              break;
12827            default: {
12828              if (!parseUnknownField(input, unknownFields,
12829                                     extensionRegistry, tag)) {
12830                done = true;
12831              }
12832              break;
12833            }
12834          }
12835        }
12836      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12837        throw e.setUnfinishedMessage(this);
12838      } catch (java.io.IOException e) {
12839        throw new com.google.protobuf.InvalidProtocolBufferException(
12840            e.getMessage()).setUnfinishedMessage(this);
12841      } finally {
12842        this.unknownFields = unknownFields.build();
12843        makeExtensionsImmutable();
12844      }
12845    }
12846    public static final com.google.protobuf.Descriptors.Descriptor
12847        getDescriptor() {
12848      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
12849    }
12850
12851    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12852        internalGetFieldAccessorTable() {
12853      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable
12854          .ensureFieldAccessorsInitialized(
12855              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class);
12856    }
12857
12858    public static com.google.protobuf.Parser<INodeDirectorySection> PARSER =
12859        new com.google.protobuf.AbstractParser<INodeDirectorySection>() {
12860      public INodeDirectorySection parsePartialFrom(
12861          com.google.protobuf.CodedInputStream input,
12862          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12863          throws com.google.protobuf.InvalidProtocolBufferException {
12864        return new INodeDirectorySection(input, extensionRegistry);
12865      }
12866    };
12867
12868    @java.lang.Override
12869    public com.google.protobuf.Parser<INodeDirectorySection> getParserForType() {
12870      return PARSER;
12871    }
12872
12873    public interface DirEntryOrBuilder
12874        extends com.google.protobuf.MessageOrBuilder {
12875
12876      // optional uint64 parent = 1;
12877      /**
12878       * <code>optional uint64 parent = 1;</code>
12879       */
12880      boolean hasParent();
12881      /**
12882       * <code>optional uint64 parent = 1;</code>
12883       */
12884      long getParent();
12885
12886      // repeated uint64 children = 2 [packed = true];
12887      /**
12888       * <code>repeated uint64 children = 2 [packed = true];</code>
12889       *
12890       * <pre>
12891       * children that are not reference nodes
12892       * </pre>
12893       */
12894      java.util.List<java.lang.Long> getChildrenList();
12895      /**
12896       * <code>repeated uint64 children = 2 [packed = true];</code>
12897       *
12898       * <pre>
12899       * children that are not reference nodes
12900       * </pre>
12901       */
12902      int getChildrenCount();
12903      /**
12904       * <code>repeated uint64 children = 2 [packed = true];</code>
12905       *
12906       * <pre>
12907       * children that are not reference nodes
12908       * </pre>
12909       */
12910      long getChildren(int index);
12911
12912      // repeated uint32 refChildren = 3 [packed = true];
12913      /**
12914       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
12915       *
12916       * <pre>
12917       * children that are reference nodes, each element is a reference node id
12918       * </pre>
12919       */
12920      java.util.List<java.lang.Integer> getRefChildrenList();
12921      /**
12922       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
12923       *
12924       * <pre>
12925       * children that are reference nodes, each element is a reference node id
12926       * </pre>
12927       */
12928      int getRefChildrenCount();
12929      /**
12930       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
12931       *
12932       * <pre>
12933       * children that are reference nodes, each element is a reference node id
12934       * </pre>
12935       */
12936      int getRefChildren(int index);
12937    }
12938    /**
12939     * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry}
12940     *
12941     * <pre>
12942     **
12943     * A single DirEntry needs to fit in the default PB max message size of
12944     * 64MB. Please be careful when adding more fields to a DirEntry!
12945     * </pre>
12946     */
12947    public static final class DirEntry extends
12948        com.google.protobuf.GeneratedMessage
12949        implements DirEntryOrBuilder {
12950      // Use DirEntry.newBuilder() to construct.
12951      private DirEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12952        super(builder);
12953        this.unknownFields = builder.getUnknownFields();
12954      }
12955      private DirEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12956
12957      private static final DirEntry defaultInstance;
12958      public static DirEntry getDefaultInstance() {
12959        return defaultInstance;
12960      }
12961
12962      public DirEntry getDefaultInstanceForType() {
12963        return defaultInstance;
12964      }
12965
12966      private final com.google.protobuf.UnknownFieldSet unknownFields;
12967      @java.lang.Override
12968      public final com.google.protobuf.UnknownFieldSet
12969          getUnknownFields() {
12970        return this.unknownFields;
12971      }
12972      private DirEntry(
12973          com.google.protobuf.CodedInputStream input,
12974          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12975          throws com.google.protobuf.InvalidProtocolBufferException {
12976        initFields();
12977        int mutable_bitField0_ = 0;
12978        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12979            com.google.protobuf.UnknownFieldSet.newBuilder();
12980        try {
12981          boolean done = false;
12982          while (!done) {
12983            int tag = input.readTag();
12984            switch (tag) {
12985              case 0:
12986                done = true;
12987                break;
12988              default: {
12989                if (!parseUnknownField(input, unknownFields,
12990                                       extensionRegistry, tag)) {
12991                  done = true;
12992                }
12993                break;
12994              }
12995              case 8: {
12996                bitField0_ |= 0x00000001;
12997                parent_ = input.readUInt64();
12998                break;
12999              }
13000              case 16: {
13001                if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
13002                  children_ = new java.util.ArrayList<java.lang.Long>();
13003                  mutable_bitField0_ |= 0x00000002;
13004                }
13005                children_.add(input.readUInt64());
13006                break;
13007              }
13008              case 18: {
13009                int length = input.readRawVarint32();
13010                int limit = input.pushLimit(length);
13011                if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
13012                  children_ = new java.util.ArrayList<java.lang.Long>();
13013                  mutable_bitField0_ |= 0x00000002;
13014                }
13015                while (input.getBytesUntilLimit() > 0) {
13016                  children_.add(input.readUInt64());
13017                }
13018                input.popLimit(limit);
13019                break;
13020              }
13021              case 24: {
13022                if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
13023                  refChildren_ = new java.util.ArrayList<java.lang.Integer>();
13024                  mutable_bitField0_ |= 0x00000004;
13025                }
13026                refChildren_.add(input.readUInt32());
13027                break;
13028              }
13029              case 26: {
13030                int length = input.readRawVarint32();
13031                int limit = input.pushLimit(length);
13032                if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
13033                  refChildren_ = new java.util.ArrayList<java.lang.Integer>();
13034                  mutable_bitField0_ |= 0x00000004;
13035                }
13036                while (input.getBytesUntilLimit() > 0) {
13037                  refChildren_.add(input.readUInt32());
13038                }
13039                input.popLimit(limit);
13040                break;
13041              }
13042            }
13043          }
13044        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13045          throw e.setUnfinishedMessage(this);
13046        } catch (java.io.IOException e) {
13047          throw new com.google.protobuf.InvalidProtocolBufferException(
13048              e.getMessage()).setUnfinishedMessage(this);
13049        } finally {
13050          if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
13051            children_ = java.util.Collections.unmodifiableList(children_);
13052          }
13053          if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
13054            refChildren_ = java.util.Collections.unmodifiableList(refChildren_);
13055          }
13056          this.unknownFields = unknownFields.build();
13057          makeExtensionsImmutable();
13058        }
13059      }
13060      public static final com.google.protobuf.Descriptors.Descriptor
13061          getDescriptor() {
13062        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
13063      }
13064
13065      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13066          internalGetFieldAccessorTable() {
13067        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable
13068            .ensureFieldAccessorsInitialized(
13069                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class);
13070      }
13071
13072      public static com.google.protobuf.Parser<DirEntry> PARSER =
13073          new com.google.protobuf.AbstractParser<DirEntry>() {
13074        public DirEntry parsePartialFrom(
13075            com.google.protobuf.CodedInputStream input,
13076            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13077            throws com.google.protobuf.InvalidProtocolBufferException {
13078          return new DirEntry(input, extensionRegistry);
13079        }
13080      };
13081
13082      @java.lang.Override
13083      public com.google.protobuf.Parser<DirEntry> getParserForType() {
13084        return PARSER;
13085      }
13086
13087      private int bitField0_;
13088      // optional uint64 parent = 1;
13089      public static final int PARENT_FIELD_NUMBER = 1;
13090      private long parent_;
13091      /**
13092       * <code>optional uint64 parent = 1;</code>
13093       */
13094      public boolean hasParent() {
13095        return ((bitField0_ & 0x00000001) == 0x00000001);
13096      }
13097      /**
13098       * <code>optional uint64 parent = 1;</code>
13099       */
13100      public long getParent() {
13101        return parent_;
13102      }
13103
13104      // repeated uint64 children = 2 [packed = true];
13105      public static final int CHILDREN_FIELD_NUMBER = 2;
13106      private java.util.List<java.lang.Long> children_;
13107      /**
13108       * <code>repeated uint64 children = 2 [packed = true];</code>
13109       *
13110       * <pre>
13111       * children that are not reference nodes
13112       * </pre>
13113       */
13114      public java.util.List<java.lang.Long>
13115          getChildrenList() {
13116        return children_;
13117      }
13118      /**
13119       * <code>repeated uint64 children = 2 [packed = true];</code>
13120       *
13121       * <pre>
13122       * children that are not reference nodes
13123       * </pre>
13124       */
13125      public int getChildrenCount() {
13126        return children_.size();
13127      }
13128      /**
13129       * <code>repeated uint64 children = 2 [packed = true];</code>
13130       *
13131       * <pre>
13132       * children that are not reference nodes
13133       * </pre>
13134       */
13135      public long getChildren(int index) {
13136        return children_.get(index);
13137      }
13138      private int childrenMemoizedSerializedSize = -1;
13139
13140      // repeated uint32 refChildren = 3 [packed = true];
13141      public static final int REFCHILDREN_FIELD_NUMBER = 3;
13142      private java.util.List<java.lang.Integer> refChildren_;
13143      /**
13144       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13145       *
13146       * <pre>
13147       * children that are reference nodes, each element is a reference node id
13148       * </pre>
13149       */
13150      public java.util.List<java.lang.Integer>
13151          getRefChildrenList() {
13152        return refChildren_;
13153      }
13154      /**
13155       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13156       *
13157       * <pre>
13158       * children that are reference nodes, each element is a reference node id
13159       * </pre>
13160       */
13161      public int getRefChildrenCount() {
13162        return refChildren_.size();
13163      }
13164      /**
13165       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13166       *
13167       * <pre>
13168       * children that are reference nodes, each element is a reference node id
13169       * </pre>
13170       */
13171      public int getRefChildren(int index) {
13172        return refChildren_.get(index);
13173      }
13174      private int refChildrenMemoizedSerializedSize = -1;
13175
13176      private void initFields() {
13177        parent_ = 0L;
13178        children_ = java.util.Collections.emptyList();
13179        refChildren_ = java.util.Collections.emptyList();
13180      }
13181      private byte memoizedIsInitialized = -1;
13182      public final boolean isInitialized() {
13183        byte isInitialized = memoizedIsInitialized;
13184        if (isInitialized != -1) return isInitialized == 1;
13185
13186        memoizedIsInitialized = 1;
13187        return true;
13188      }
13189
13190      public void writeTo(com.google.protobuf.CodedOutputStream output)
13191                          throws java.io.IOException {
13192        getSerializedSize();
13193        if (((bitField0_ & 0x00000001) == 0x00000001)) {
13194          output.writeUInt64(1, parent_);
13195        }
13196        if (getChildrenList().size() > 0) {
13197          output.writeRawVarint32(18);
13198          output.writeRawVarint32(childrenMemoizedSerializedSize);
13199        }
13200        for (int i = 0; i < children_.size(); i++) {
13201          output.writeUInt64NoTag(children_.get(i));
13202        }
13203        if (getRefChildrenList().size() > 0) {
13204          output.writeRawVarint32(26);
13205          output.writeRawVarint32(refChildrenMemoizedSerializedSize);
13206        }
13207        for (int i = 0; i < refChildren_.size(); i++) {
13208          output.writeUInt32NoTag(refChildren_.get(i));
13209        }
13210        getUnknownFields().writeTo(output);
13211      }
13212
13213      private int memoizedSerializedSize = -1;
13214      public int getSerializedSize() {
13215        int size = memoizedSerializedSize;
13216        if (size != -1) return size;
13217
13218        size = 0;
13219        if (((bitField0_ & 0x00000001) == 0x00000001)) {
13220          size += com.google.protobuf.CodedOutputStream
13221            .computeUInt64Size(1, parent_);
13222        }
13223        {
13224          int dataSize = 0;
13225          for (int i = 0; i < children_.size(); i++) {
13226            dataSize += com.google.protobuf.CodedOutputStream
13227              .computeUInt64SizeNoTag(children_.get(i));
13228          }
13229          size += dataSize;
13230          if (!getChildrenList().isEmpty()) {
13231            size += 1;
13232            size += com.google.protobuf.CodedOutputStream
13233                .computeInt32SizeNoTag(dataSize);
13234          }
13235          childrenMemoizedSerializedSize = dataSize;
13236        }
13237        {
13238          int dataSize = 0;
13239          for (int i = 0; i < refChildren_.size(); i++) {
13240            dataSize += com.google.protobuf.CodedOutputStream
13241              .computeUInt32SizeNoTag(refChildren_.get(i));
13242          }
13243          size += dataSize;
13244          if (!getRefChildrenList().isEmpty()) {
13245            size += 1;
13246            size += com.google.protobuf.CodedOutputStream
13247                .computeInt32SizeNoTag(dataSize);
13248          }
13249          refChildrenMemoizedSerializedSize = dataSize;
13250        }
13251        size += getUnknownFields().getSerializedSize();
13252        memoizedSerializedSize = size;
13253        return size;
13254      }
13255
13256      private static final long serialVersionUID = 0L;
13257      @java.lang.Override
13258      protected java.lang.Object writeReplace()
13259          throws java.io.ObjectStreamException {
13260        return super.writeReplace();
13261      }
13262
13263      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13264          com.google.protobuf.ByteString data)
13265          throws com.google.protobuf.InvalidProtocolBufferException {
13266        return PARSER.parseFrom(data);
13267      }
13268      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13269          com.google.protobuf.ByteString data,
13270          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13271          throws com.google.protobuf.InvalidProtocolBufferException {
13272        return PARSER.parseFrom(data, extensionRegistry);
13273      }
13274      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(byte[] data)
13275          throws com.google.protobuf.InvalidProtocolBufferException {
13276        return PARSER.parseFrom(data);
13277      }
13278      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13279          byte[] data,
13280          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13281          throws com.google.protobuf.InvalidProtocolBufferException {
13282        return PARSER.parseFrom(data, extensionRegistry);
13283      }
13284      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(java.io.InputStream input)
13285          throws java.io.IOException {
13286        return PARSER.parseFrom(input);
13287      }
13288      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13289          java.io.InputStream input,
13290          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13291          throws java.io.IOException {
13292        return PARSER.parseFrom(input, extensionRegistry);
13293      }
13294      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom(java.io.InputStream input)
13295          throws java.io.IOException {
13296        return PARSER.parseDelimitedFrom(input);
13297      }
13298      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom(
13299          java.io.InputStream input,
13300          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13301          throws java.io.IOException {
13302        return PARSER.parseDelimitedFrom(input, extensionRegistry);
13303      }
13304      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13305          com.google.protobuf.CodedInputStream input)
13306          throws java.io.IOException {
13307        return PARSER.parseFrom(input);
13308      }
13309      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
13310          com.google.protobuf.CodedInputStream input,
13311          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13312          throws java.io.IOException {
13313        return PARSER.parseFrom(input, extensionRegistry);
13314      }
13315
13316      public static Builder newBuilder() { return Builder.create(); }
13317      public Builder newBuilderForType() { return newBuilder(); }
13318      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry prototype) {
13319        return newBuilder().mergeFrom(prototype);
13320      }
13321      public Builder toBuilder() { return newBuilder(this); }
13322
13323      @java.lang.Override
13324      protected Builder newBuilderForType(
13325          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13326        Builder builder = new Builder(parent);
13327        return builder;
13328      }
13329      /**
13330       * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry}
13331       *
13332       * <pre>
13333       **
13334       * A single DirEntry needs to fit in the default PB max message size of
13335       * 64MB. Please be careful when adding more fields to a DirEntry!
13336       * </pre>
13337       */
13338      public static final class Builder extends
13339          com.google.protobuf.GeneratedMessage.Builder<Builder>
13340         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntryOrBuilder {
13341        public static final com.google.protobuf.Descriptors.Descriptor
13342            getDescriptor() {
13343          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
13344        }
13345
13346        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13347            internalGetFieldAccessorTable() {
13348          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable
13349              .ensureFieldAccessorsInitialized(
13350                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class);
13351        }
13352
13353        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.newBuilder()
13354        private Builder() {
13355          maybeForceBuilderInitialization();
13356        }
13357
13358        private Builder(
13359            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13360          super(parent);
13361          maybeForceBuilderInitialization();
13362        }
13363        private void maybeForceBuilderInitialization() {
13364          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13365          }
13366        }
13367        private static Builder create() {
13368          return new Builder();
13369        }
13370
13371        public Builder clear() {
13372          super.clear();
13373          parent_ = 0L;
13374          bitField0_ = (bitField0_ & ~0x00000001);
13375          children_ = java.util.Collections.emptyList();
13376          bitField0_ = (bitField0_ & ~0x00000002);
13377          refChildren_ = java.util.Collections.emptyList();
13378          bitField0_ = (bitField0_ & ~0x00000004);
13379          return this;
13380        }
13381
13382        public Builder clone() {
13383          return create().mergeFrom(buildPartial());
13384        }
13385
13386        public com.google.protobuf.Descriptors.Descriptor
13387            getDescriptorForType() {
13388          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
13389        }
13390
13391        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstanceForType() {
13392          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance();
13393        }
13394
13395        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry build() {
13396          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = buildPartial();
13397          if (!result.isInitialized()) {
13398            throw newUninitializedMessageException(result);
13399          }
13400          return result;
13401        }
13402
13403        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry buildPartial() {
13404          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry(this);
13405          int from_bitField0_ = bitField0_;
13406          int to_bitField0_ = 0;
13407          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13408            to_bitField0_ |= 0x00000001;
13409          }
13410          result.parent_ = parent_;
13411          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13412            children_ = java.util.Collections.unmodifiableList(children_);
13413            bitField0_ = (bitField0_ & ~0x00000002);
13414          }
13415          result.children_ = children_;
13416          if (((bitField0_ & 0x00000004) == 0x00000004)) {
13417            refChildren_ = java.util.Collections.unmodifiableList(refChildren_);
13418            bitField0_ = (bitField0_ & ~0x00000004);
13419          }
13420          result.refChildren_ = refChildren_;
13421          result.bitField0_ = to_bitField0_;
13422          onBuilt();
13423          return result;
13424        }
13425
13426        public Builder mergeFrom(com.google.protobuf.Message other) {
13427          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) {
13428            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry)other);
13429          } else {
13430            super.mergeFrom(other);
13431            return this;
13432          }
13433        }
13434
13435        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry other) {
13436          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance()) return this;
13437          if (other.hasParent()) {
13438            setParent(other.getParent());
13439          }
13440          if (!other.children_.isEmpty()) {
13441            if (children_.isEmpty()) {
13442              children_ = other.children_;
13443              bitField0_ = (bitField0_ & ~0x00000002);
13444            } else {
13445              ensureChildrenIsMutable();
13446              children_.addAll(other.children_);
13447            }
13448            onChanged();
13449          }
13450          if (!other.refChildren_.isEmpty()) {
13451            if (refChildren_.isEmpty()) {
13452              refChildren_ = other.refChildren_;
13453              bitField0_ = (bitField0_ & ~0x00000004);
13454            } else {
13455              ensureRefChildrenIsMutable();
13456              refChildren_.addAll(other.refChildren_);
13457            }
13458            onChanged();
13459          }
13460          this.mergeUnknownFields(other.getUnknownFields());
13461          return this;
13462        }
13463
13464        public final boolean isInitialized() {
13465          return true;
13466        }
13467
13468        public Builder mergeFrom(
13469            com.google.protobuf.CodedInputStream input,
13470            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13471            throws java.io.IOException {
13472          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parsedMessage = null;
13473          try {
13474            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13475          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13476            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) e.getUnfinishedMessage();
13477            throw e;
13478          } finally {
13479            if (parsedMessage != null) {
13480              mergeFrom(parsedMessage);
13481            }
13482          }
13483          return this;
13484        }
13485        private int bitField0_;
13486
13487        // optional uint64 parent = 1;
13488        private long parent_ ;
13489        /**
13490         * <code>optional uint64 parent = 1;</code>
13491         */
13492        public boolean hasParent() {
13493          return ((bitField0_ & 0x00000001) == 0x00000001);
13494        }
13495        /**
13496         * <code>optional uint64 parent = 1;</code>
13497         */
13498        public long getParent() {
13499          return parent_;
13500        }
13501        /**
13502         * <code>optional uint64 parent = 1;</code>
13503         */
13504        public Builder setParent(long value) {
13505          bitField0_ |= 0x00000001;
13506          parent_ = value;
13507          onChanged();
13508          return this;
13509        }
13510        /**
13511         * <code>optional uint64 parent = 1;</code>
13512         */
13513        public Builder clearParent() {
13514          bitField0_ = (bitField0_ & ~0x00000001);
13515          parent_ = 0L;
13516          onChanged();
13517          return this;
13518        }
13519
13520        // repeated uint64 children = 2 [packed = true];
13521        private java.util.List<java.lang.Long> children_ = java.util.Collections.emptyList();
13522        private void ensureChildrenIsMutable() {
13523          if (!((bitField0_ & 0x00000002) == 0x00000002)) {
13524            children_ = new java.util.ArrayList<java.lang.Long>(children_);
13525            bitField0_ |= 0x00000002;
13526           }
13527        }
13528        /**
13529         * <code>repeated uint64 children = 2 [packed = true];</code>
13530         *
13531         * <pre>
13532         * children that are not reference nodes
13533         * </pre>
13534         */
13535        public java.util.List<java.lang.Long>
13536            getChildrenList() {
13537          return java.util.Collections.unmodifiableList(children_);
13538        }
13539        /**
13540         * <code>repeated uint64 children = 2 [packed = true];</code>
13541         *
13542         * <pre>
13543         * children that are not reference nodes
13544         * </pre>
13545         */
13546        public int getChildrenCount() {
13547          return children_.size();
13548        }
13549        /**
13550         * <code>repeated uint64 children = 2 [packed = true];</code>
13551         *
13552         * <pre>
13553         * children that are not reference nodes
13554         * </pre>
13555         */
13556        public long getChildren(int index) {
13557          return children_.get(index);
13558        }
13559        /**
13560         * <code>repeated uint64 children = 2 [packed = true];</code>
13561         *
13562         * <pre>
13563         * children that are not reference nodes
13564         * </pre>
13565         */
13566        public Builder setChildren(
13567            int index, long value) {
13568          ensureChildrenIsMutable();
13569          children_.set(index, value);
13570          onChanged();
13571          return this;
13572        }
13573        /**
13574         * <code>repeated uint64 children = 2 [packed = true];</code>
13575         *
13576         * <pre>
13577         * children that are not reference nodes
13578         * </pre>
13579         */
13580        public Builder addChildren(long value) {
13581          ensureChildrenIsMutable();
13582          children_.add(value);
13583          onChanged();
13584          return this;
13585        }
13586        /**
13587         * <code>repeated uint64 children = 2 [packed = true];</code>
13588         *
13589         * <pre>
13590         * children that are not reference nodes
13591         * </pre>
13592         */
13593        public Builder addAllChildren(
13594            java.lang.Iterable<? extends java.lang.Long> values) {
13595          ensureChildrenIsMutable();
13596          super.addAll(values, children_);
13597          onChanged();
13598          return this;
13599        }
13600        /**
13601         * <code>repeated uint64 children = 2 [packed = true];</code>
13602         *
13603         * <pre>
13604         * children that are not reference nodes
13605         * </pre>
13606         */
13607        public Builder clearChildren() {
13608          children_ = java.util.Collections.emptyList();
13609          bitField0_ = (bitField0_ & ~0x00000002);
13610          onChanged();
13611          return this;
13612        }
13613
13614        // repeated uint32 refChildren = 3 [packed = true];
13615        private java.util.List<java.lang.Integer> refChildren_ = java.util.Collections.emptyList();
13616        private void ensureRefChildrenIsMutable() {
13617          if (!((bitField0_ & 0x00000004) == 0x00000004)) {
13618            refChildren_ = new java.util.ArrayList<java.lang.Integer>(refChildren_);
13619            bitField0_ |= 0x00000004;
13620           }
13621        }
13622        /**
13623         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13624         *
13625         * <pre>
13626         * children that are reference nodes, each element is a reference node id
13627         * </pre>
13628         */
13629        public java.util.List<java.lang.Integer>
13630            getRefChildrenList() {
13631          return java.util.Collections.unmodifiableList(refChildren_);
13632        }
13633        /**
13634         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13635         *
13636         * <pre>
13637         * children that are reference nodes, each element is a reference node id
13638         * </pre>
13639         */
13640        public int getRefChildrenCount() {
13641          return refChildren_.size();
13642        }
13643        /**
13644         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13645         *
13646         * <pre>
13647         * children that are reference nodes, each element is a reference node id
13648         * </pre>
13649         */
13650        public int getRefChildren(int index) {
13651          return refChildren_.get(index);
13652        }
13653        /**
13654         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13655         *
13656         * <pre>
13657         * children that are reference nodes, each element is a reference node id
13658         * </pre>
13659         */
13660        public Builder setRefChildren(
13661            int index, int value) {
13662          ensureRefChildrenIsMutable();
13663          refChildren_.set(index, value);
13664          onChanged();
13665          return this;
13666        }
13667        /**
13668         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13669         *
13670         * <pre>
13671         * children that are reference nodes, each element is a reference node id
13672         * </pre>
13673         */
13674        public Builder addRefChildren(int value) {
13675          ensureRefChildrenIsMutable();
13676          refChildren_.add(value);
13677          onChanged();
13678          return this;
13679        }
13680        /**
13681         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13682         *
13683         * <pre>
13684         * children that are reference nodes, each element is a reference node id
13685         * </pre>
13686         */
13687        public Builder addAllRefChildren(
13688            java.lang.Iterable<? extends java.lang.Integer> values) {
13689          ensureRefChildrenIsMutable();
13690          super.addAll(values, refChildren_);
13691          onChanged();
13692          return this;
13693        }
13694        /**
13695         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
13696         *
13697         * <pre>
13698         * children that are reference nodes, each element is a reference node id
13699         * </pre>
13700         */
13701        public Builder clearRefChildren() {
13702          refChildren_ = java.util.Collections.emptyList();
13703          bitField0_ = (bitField0_ & ~0x00000004);
13704          onChanged();
13705          return this;
13706        }
13707
13708        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
13709      }
13710
13711      static {
13712        defaultInstance = new DirEntry(true);
13713        defaultInstance.initFields();
13714      }
13715
13716      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
13717    }
13718
13719    private void initFields() {
13720    }
13721    private byte memoizedIsInitialized = -1;
13722    public final boolean isInitialized() {
13723      byte isInitialized = memoizedIsInitialized;
13724      if (isInitialized != -1) return isInitialized == 1;
13725
13726      memoizedIsInitialized = 1;
13727      return true;
13728    }
13729
13730    public void writeTo(com.google.protobuf.CodedOutputStream output)
13731                        throws java.io.IOException {
13732      getSerializedSize();
13733      getUnknownFields().writeTo(output);
13734    }
13735
13736    private int memoizedSerializedSize = -1;
13737    public int getSerializedSize() {
13738      int size = memoizedSerializedSize;
13739      if (size != -1) return size;
13740
13741      size = 0;
13742      size += getUnknownFields().getSerializedSize();
13743      memoizedSerializedSize = size;
13744      return size;
13745    }
13746
13747    private static final long serialVersionUID = 0L;
13748    @java.lang.Override
13749    protected java.lang.Object writeReplace()
13750        throws java.io.ObjectStreamException {
13751      return super.writeReplace();
13752    }
13753
13754    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13755        com.google.protobuf.ByteString data)
13756        throws com.google.protobuf.InvalidProtocolBufferException {
13757      return PARSER.parseFrom(data);
13758    }
13759    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13760        com.google.protobuf.ByteString data,
13761        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13762        throws com.google.protobuf.InvalidProtocolBufferException {
13763      return PARSER.parseFrom(data, extensionRegistry);
13764    }
13765    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(byte[] data)
13766        throws com.google.protobuf.InvalidProtocolBufferException {
13767      return PARSER.parseFrom(data);
13768    }
13769    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13770        byte[] data,
13771        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13772        throws com.google.protobuf.InvalidProtocolBufferException {
13773      return PARSER.parseFrom(data, extensionRegistry);
13774    }
13775    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(java.io.InputStream input)
13776        throws java.io.IOException {
13777      return PARSER.parseFrom(input);
13778    }
13779    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13780        java.io.InputStream input,
13781        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13782        throws java.io.IOException {
13783      return PARSER.parseFrom(input, extensionRegistry);
13784    }
13785    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom(java.io.InputStream input)
13786        throws java.io.IOException {
13787      return PARSER.parseDelimitedFrom(input);
13788    }
13789    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom(
13790        java.io.InputStream input,
13791        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13792        throws java.io.IOException {
13793      return PARSER.parseDelimitedFrom(input, extensionRegistry);
13794    }
13795    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13796        com.google.protobuf.CodedInputStream input)
13797        throws java.io.IOException {
13798      return PARSER.parseFrom(input);
13799    }
13800    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
13801        com.google.protobuf.CodedInputStream input,
13802        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13803        throws java.io.IOException {
13804      return PARSER.parseFrom(input, extensionRegistry);
13805    }
13806
13807    public static Builder newBuilder() { return Builder.create(); }
13808    public Builder newBuilderForType() { return newBuilder(); }
13809    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection prototype) {
13810      return newBuilder().mergeFrom(prototype);
13811    }
13812    public Builder toBuilder() { return newBuilder(this); }
13813
13814    @java.lang.Override
13815    protected Builder newBuilderForType(
13816        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13817      Builder builder = new Builder(parent);
13818      return builder;
13819    }
13820    /**
13821     * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection}
13822     *
13823     * <pre>
13824     **
13825     * This section records the children of each directories
13826     * NAME: INODE_DIR
13827     * </pre>
13828     */
13829    public static final class Builder extends
13830        com.google.protobuf.GeneratedMessage.Builder<Builder>
13831       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySectionOrBuilder {
13832      public static final com.google.protobuf.Descriptors.Descriptor
13833          getDescriptor() {
13834        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
13835      }
13836
13837      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13838          internalGetFieldAccessorTable() {
13839        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable
13840            .ensureFieldAccessorsInitialized(
13841                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class);
13842      }
13843
13844      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.newBuilder()
13845      private Builder() {
13846        maybeForceBuilderInitialization();
13847      }
13848
13849      private Builder(
13850          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13851        super(parent);
13852        maybeForceBuilderInitialization();
13853      }
13854      private void maybeForceBuilderInitialization() {
13855        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13856        }
13857      }
13858      private static Builder create() {
13859        return new Builder();
13860      }
13861
13862      public Builder clear() {
13863        super.clear();
13864        return this;
13865      }
13866
13867      public Builder clone() {
13868        return create().mergeFrom(buildPartial());
13869      }
13870
13871      public com.google.protobuf.Descriptors.Descriptor
13872          getDescriptorForType() {
13873        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
13874      }
13875
13876      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstanceForType() {
13877        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance();
13878      }
13879
13880      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection build() {
13881        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = buildPartial();
13882        if (!result.isInitialized()) {
13883          throw newUninitializedMessageException(result);
13884        }
13885        return result;
13886      }
13887
13888      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection buildPartial() {
13889        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection(this);
13890        onBuilt();
13891        return result;
13892      }
13893
13894      public Builder mergeFrom(com.google.protobuf.Message other) {
13895        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) {
13896          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection)other);
13897        } else {
13898          super.mergeFrom(other);
13899          return this;
13900        }
13901      }
13902
13903      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection other) {
13904        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance()) return this;
13905        this.mergeUnknownFields(other.getUnknownFields());
13906        return this;
13907      }
13908
13909      public final boolean isInitialized() {
13910        return true;
13911      }
13912
13913      public Builder mergeFrom(
13914          com.google.protobuf.CodedInputStream input,
13915          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13916          throws java.io.IOException {
13917        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parsedMessage = null;
13918        try {
13919          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13920        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13921          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) e.getUnfinishedMessage();
13922          throw e;
13923        } finally {
13924          if (parsedMessage != null) {
13925            mergeFrom(parsedMessage);
13926          }
13927        }
13928        return this;
13929      }
13930
13931      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection)
13932    }
13933
13934    static {
13935      defaultInstance = new INodeDirectorySection(true);
13936      defaultInstance.initFields();
13937    }
13938
13939    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection)
13940  }
13941
13942  public interface INodeReferenceSectionOrBuilder
13943      extends com.google.protobuf.MessageOrBuilder {
13944  }
13945  /**
13946   * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection}
13947   */
13948  public static final class INodeReferenceSection extends
13949      com.google.protobuf.GeneratedMessage
13950      implements INodeReferenceSectionOrBuilder {
13951    // Use INodeReferenceSection.newBuilder() to construct.
13952    private INodeReferenceSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13953      super(builder);
13954      this.unknownFields = builder.getUnknownFields();
13955    }
13956    private INodeReferenceSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13957
13958    private static final INodeReferenceSection defaultInstance;
13959    public static INodeReferenceSection getDefaultInstance() {
13960      return defaultInstance;
13961    }
13962
13963    public INodeReferenceSection getDefaultInstanceForType() {
13964      return defaultInstance;
13965    }
13966
13967    private final com.google.protobuf.UnknownFieldSet unknownFields;
13968    @java.lang.Override
13969    public final com.google.protobuf.UnknownFieldSet
13970        getUnknownFields() {
13971      return this.unknownFields;
13972    }
13973    private INodeReferenceSection(
13974        com.google.protobuf.CodedInputStream input,
13975        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13976        throws com.google.protobuf.InvalidProtocolBufferException {
13977      initFields();
13978      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13979          com.google.protobuf.UnknownFieldSet.newBuilder();
13980      try {
13981        boolean done = false;
13982        while (!done) {
13983          int tag = input.readTag();
13984          switch (tag) {
13985            case 0:
13986              done = true;
13987              break;
13988            default: {
13989              if (!parseUnknownField(input, unknownFields,
13990                                     extensionRegistry, tag)) {
13991                done = true;
13992              }
13993              break;
13994            }
13995          }
13996        }
13997      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13998        throw e.setUnfinishedMessage(this);
13999      } catch (java.io.IOException e) {
14000        throw new com.google.protobuf.InvalidProtocolBufferException(
14001            e.getMessage()).setUnfinishedMessage(this);
14002      } finally {
14003        this.unknownFields = unknownFields.build();
14004        makeExtensionsImmutable();
14005      }
14006    }
14007    public static final com.google.protobuf.Descriptors.Descriptor
14008        getDescriptor() {
14009      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
14010    }
14011
14012    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14013        internalGetFieldAccessorTable() {
14014      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable
14015          .ensureFieldAccessorsInitialized(
14016              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class);
14017    }
14018
14019    public static com.google.protobuf.Parser<INodeReferenceSection> PARSER =
14020        new com.google.protobuf.AbstractParser<INodeReferenceSection>() {
14021      public INodeReferenceSection parsePartialFrom(
14022          com.google.protobuf.CodedInputStream input,
14023          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14024          throws com.google.protobuf.InvalidProtocolBufferException {
14025        return new INodeReferenceSection(input, extensionRegistry);
14026      }
14027    };
14028
14029    @java.lang.Override
14030    public com.google.protobuf.Parser<INodeReferenceSection> getParserForType() {
14031      return PARSER;
14032    }
14033
14034    public interface INodeReferenceOrBuilder
14035        extends com.google.protobuf.MessageOrBuilder {
14036
14037      // optional uint64 referredId = 1;
14038      /**
14039       * <code>optional uint64 referredId = 1;</code>
14040       *
14041       * <pre>
14042       * id of the referred inode
14043       * </pre>
14044       */
14045      boolean hasReferredId();
14046      /**
14047       * <code>optional uint64 referredId = 1;</code>
14048       *
14049       * <pre>
14050       * id of the referred inode
14051       * </pre>
14052       */
14053      long getReferredId();
14054
14055      // optional bytes name = 2;
14056      /**
14057       * <code>optional bytes name = 2;</code>
14058       *
14059       * <pre>
14060       * local name recorded in WithName
14061       * </pre>
14062       */
14063      boolean hasName();
14064      /**
14065       * <code>optional bytes name = 2;</code>
14066       *
14067       * <pre>
14068       * local name recorded in WithName
14069       * </pre>
14070       */
14071      com.google.protobuf.ByteString getName();
14072
14073      // optional uint32 dstSnapshotId = 3;
14074      /**
14075       * <code>optional uint32 dstSnapshotId = 3;</code>
14076       *
14077       * <pre>
14078       * recorded in DstReference
14079       * </pre>
14080       */
14081      boolean hasDstSnapshotId();
14082      /**
14083       * <code>optional uint32 dstSnapshotId = 3;</code>
14084       *
14085       * <pre>
14086       * recorded in DstReference
14087       * </pre>
14088       */
14089      int getDstSnapshotId();
14090
14091      // optional uint32 lastSnapshotId = 4;
14092      /**
14093       * <code>optional uint32 lastSnapshotId = 4;</code>
14094       *
14095       * <pre>
14096       * recorded in WithName
14097       * </pre>
14098       */
14099      boolean hasLastSnapshotId();
14100      /**
14101       * <code>optional uint32 lastSnapshotId = 4;</code>
14102       *
14103       * <pre>
14104       * recorded in WithName
14105       * </pre>
14106       */
14107      int getLastSnapshotId();
14108    }
14109    /**
14110     * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference}
14111     */
14112    public static final class INodeReference extends
14113        com.google.protobuf.GeneratedMessage
14114        implements INodeReferenceOrBuilder {
14115      // Use INodeReference.newBuilder() to construct.
14116      private INodeReference(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14117        super(builder);
14118        this.unknownFields = builder.getUnknownFields();
14119      }
14120      private INodeReference(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14121
14122      private static final INodeReference defaultInstance;
14123      public static INodeReference getDefaultInstance() {
14124        return defaultInstance;
14125      }
14126
14127      public INodeReference getDefaultInstanceForType() {
14128        return defaultInstance;
14129      }
14130
14131      private final com.google.protobuf.UnknownFieldSet unknownFields;
14132      @java.lang.Override
14133      public final com.google.protobuf.UnknownFieldSet
14134          getUnknownFields() {
14135        return this.unknownFields;
14136      }
14137      private INodeReference(
14138          com.google.protobuf.CodedInputStream input,
14139          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14140          throws com.google.protobuf.InvalidProtocolBufferException {
14141        initFields();
14142        int mutable_bitField0_ = 0;
14143        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14144            com.google.protobuf.UnknownFieldSet.newBuilder();
14145        try {
14146          boolean done = false;
14147          while (!done) {
14148            int tag = input.readTag();
14149            switch (tag) {
14150              case 0:
14151                done = true;
14152                break;
14153              default: {
14154                if (!parseUnknownField(input, unknownFields,
14155                                       extensionRegistry, tag)) {
14156                  done = true;
14157                }
14158                break;
14159              }
14160              case 8: {
14161                bitField0_ |= 0x00000001;
14162                referredId_ = input.readUInt64();
14163                break;
14164              }
14165              case 18: {
14166                bitField0_ |= 0x00000002;
14167                name_ = input.readBytes();
14168                break;
14169              }
14170              case 24: {
14171                bitField0_ |= 0x00000004;
14172                dstSnapshotId_ = input.readUInt32();
14173                break;
14174              }
14175              case 32: {
14176                bitField0_ |= 0x00000008;
14177                lastSnapshotId_ = input.readUInt32();
14178                break;
14179              }
14180            }
14181          }
14182        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14183          throw e.setUnfinishedMessage(this);
14184        } catch (java.io.IOException e) {
14185          throw new com.google.protobuf.InvalidProtocolBufferException(
14186              e.getMessage()).setUnfinishedMessage(this);
14187        } finally {
14188          this.unknownFields = unknownFields.build();
14189          makeExtensionsImmutable();
14190        }
14191      }
14192      public static final com.google.protobuf.Descriptors.Descriptor
14193          getDescriptor() {
14194        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
14195      }
14196
14197      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14198          internalGetFieldAccessorTable() {
14199        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable
14200            .ensureFieldAccessorsInitialized(
14201                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class);
14202      }
14203
14204      public static com.google.protobuf.Parser<INodeReference> PARSER =
14205          new com.google.protobuf.AbstractParser<INodeReference>() {
14206        public INodeReference parsePartialFrom(
14207            com.google.protobuf.CodedInputStream input,
14208            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14209            throws com.google.protobuf.InvalidProtocolBufferException {
14210          return new INodeReference(input, extensionRegistry);
14211        }
14212      };
14213
14214      @java.lang.Override
14215      public com.google.protobuf.Parser<INodeReference> getParserForType() {
14216        return PARSER;
14217      }
14218
14219      private int bitField0_;
14220      // optional uint64 referredId = 1;
14221      public static final int REFERREDID_FIELD_NUMBER = 1;
14222      private long referredId_;
14223      /**
14224       * <code>optional uint64 referredId = 1;</code>
14225       *
14226       * <pre>
14227       * id of the referred inode
14228       * </pre>
14229       */
14230      public boolean hasReferredId() {
14231        return ((bitField0_ & 0x00000001) == 0x00000001);
14232      }
14233      /**
14234       * <code>optional uint64 referredId = 1;</code>
14235       *
14236       * <pre>
14237       * id of the referred inode
14238       * </pre>
14239       */
14240      public long getReferredId() {
14241        return referredId_;
14242      }
14243
14244      // optional bytes name = 2;
14245      public static final int NAME_FIELD_NUMBER = 2;
14246      private com.google.protobuf.ByteString name_;
14247      /**
14248       * <code>optional bytes name = 2;</code>
14249       *
14250       * <pre>
14251       * local name recorded in WithName
14252       * </pre>
14253       */
14254      public boolean hasName() {
14255        return ((bitField0_ & 0x00000002) == 0x00000002);
14256      }
14257      /**
14258       * <code>optional bytes name = 2;</code>
14259       *
14260       * <pre>
14261       * local name recorded in WithName
14262       * </pre>
14263       */
14264      public com.google.protobuf.ByteString getName() {
14265        return name_;
14266      }
14267
14268      // optional uint32 dstSnapshotId = 3;
14269      public static final int DSTSNAPSHOTID_FIELD_NUMBER = 3;
14270      private int dstSnapshotId_;
14271      /**
14272       * <code>optional uint32 dstSnapshotId = 3;</code>
14273       *
14274       * <pre>
14275       * recorded in DstReference
14276       * </pre>
14277       */
14278      public boolean hasDstSnapshotId() {
14279        return ((bitField0_ & 0x00000004) == 0x00000004);
14280      }
14281      /**
14282       * <code>optional uint32 dstSnapshotId = 3;</code>
14283       *
14284       * <pre>
14285       * recorded in DstReference
14286       * </pre>
14287       */
14288      public int getDstSnapshotId() {
14289        return dstSnapshotId_;
14290      }
14291
14292      // optional uint32 lastSnapshotId = 4;
14293      public static final int LASTSNAPSHOTID_FIELD_NUMBER = 4;
14294      private int lastSnapshotId_;
14295      /**
14296       * <code>optional uint32 lastSnapshotId = 4;</code>
14297       *
14298       * <pre>
14299       * recorded in WithName
14300       * </pre>
14301       */
14302      public boolean hasLastSnapshotId() {
14303        return ((bitField0_ & 0x00000008) == 0x00000008);
14304      }
14305      /**
14306       * <code>optional uint32 lastSnapshotId = 4;</code>
14307       *
14308       * <pre>
14309       * recorded in WithName
14310       * </pre>
14311       */
14312      public int getLastSnapshotId() {
14313        return lastSnapshotId_;
14314      }
14315
14316      private void initFields() {
14317        referredId_ = 0L;
14318        name_ = com.google.protobuf.ByteString.EMPTY;
14319        dstSnapshotId_ = 0;
14320        lastSnapshotId_ = 0;
14321      }
14322      private byte memoizedIsInitialized = -1;
14323      public final boolean isInitialized() {
14324        byte isInitialized = memoizedIsInitialized;
14325        if (isInitialized != -1) return isInitialized == 1;
14326
14327        memoizedIsInitialized = 1;
14328        return true;
14329      }
14330
14331      public void writeTo(com.google.protobuf.CodedOutputStream output)
14332                          throws java.io.IOException {
14333        getSerializedSize();
14334        if (((bitField0_ & 0x00000001) == 0x00000001)) {
14335          output.writeUInt64(1, referredId_);
14336        }
14337        if (((bitField0_ & 0x00000002) == 0x00000002)) {
14338          output.writeBytes(2, name_);
14339        }
14340        if (((bitField0_ & 0x00000004) == 0x00000004)) {
14341          output.writeUInt32(3, dstSnapshotId_);
14342        }
14343        if (((bitField0_ & 0x00000008) == 0x00000008)) {
14344          output.writeUInt32(4, lastSnapshotId_);
14345        }
14346        getUnknownFields().writeTo(output);
14347      }
14348
14349      private int memoizedSerializedSize = -1;
14350      public int getSerializedSize() {
14351        int size = memoizedSerializedSize;
14352        if (size != -1) return size;
14353
14354        size = 0;
14355        if (((bitField0_ & 0x00000001) == 0x00000001)) {
14356          size += com.google.protobuf.CodedOutputStream
14357            .computeUInt64Size(1, referredId_);
14358        }
14359        if (((bitField0_ & 0x00000002) == 0x00000002)) {
14360          size += com.google.protobuf.CodedOutputStream
14361            .computeBytesSize(2, name_);
14362        }
14363        if (((bitField0_ & 0x00000004) == 0x00000004)) {
14364          size += com.google.protobuf.CodedOutputStream
14365            .computeUInt32Size(3, dstSnapshotId_);
14366        }
14367        if (((bitField0_ & 0x00000008) == 0x00000008)) {
14368          size += com.google.protobuf.CodedOutputStream
14369            .computeUInt32Size(4, lastSnapshotId_);
14370        }
14371        size += getUnknownFields().getSerializedSize();
14372        memoizedSerializedSize = size;
14373        return size;
14374      }
14375
14376      private static final long serialVersionUID = 0L;
14377      @java.lang.Override
14378      protected java.lang.Object writeReplace()
14379          throws java.io.ObjectStreamException {
14380        return super.writeReplace();
14381      }
14382
14383      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14384          com.google.protobuf.ByteString data)
14385          throws com.google.protobuf.InvalidProtocolBufferException {
14386        return PARSER.parseFrom(data);
14387      }
14388      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14389          com.google.protobuf.ByteString data,
14390          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14391          throws com.google.protobuf.InvalidProtocolBufferException {
14392        return PARSER.parseFrom(data, extensionRegistry);
14393      }
14394      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(byte[] data)
14395          throws com.google.protobuf.InvalidProtocolBufferException {
14396        return PARSER.parseFrom(data);
14397      }
14398      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14399          byte[] data,
14400          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14401          throws com.google.protobuf.InvalidProtocolBufferException {
14402        return PARSER.parseFrom(data, extensionRegistry);
14403      }
14404      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(java.io.InputStream input)
14405          throws java.io.IOException {
14406        return PARSER.parseFrom(input);
14407      }
14408      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14409          java.io.InputStream input,
14410          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14411          throws java.io.IOException {
14412        return PARSER.parseFrom(input, extensionRegistry);
14413      }
14414      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom(java.io.InputStream input)
14415          throws java.io.IOException {
14416        return PARSER.parseDelimitedFrom(input);
14417      }
14418      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom(
14419          java.io.InputStream input,
14420          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14421          throws java.io.IOException {
14422        return PARSER.parseDelimitedFrom(input, extensionRegistry);
14423      }
14424      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14425          com.google.protobuf.CodedInputStream input)
14426          throws java.io.IOException {
14427        return PARSER.parseFrom(input);
14428      }
14429      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
14430          com.google.protobuf.CodedInputStream input,
14431          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14432          throws java.io.IOException {
14433        return PARSER.parseFrom(input, extensionRegistry);
14434      }
14435
14436      public static Builder newBuilder() { return Builder.create(); }
14437      public Builder newBuilderForType() { return newBuilder(); }
14438      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference prototype) {
14439        return newBuilder().mergeFrom(prototype);
14440      }
14441      public Builder toBuilder() { return newBuilder(this); }
14442
14443      @java.lang.Override
14444      protected Builder newBuilderForType(
14445          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14446        Builder builder = new Builder(parent);
14447        return builder;
14448      }
14449      /**
14450       * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference}
14451       */
14452      public static final class Builder extends
14453          com.google.protobuf.GeneratedMessage.Builder<Builder>
14454         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReferenceOrBuilder {
14455        public static final com.google.protobuf.Descriptors.Descriptor
14456            getDescriptor() {
14457          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
14458        }
14459
14460        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14461            internalGetFieldAccessorTable() {
14462          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable
14463              .ensureFieldAccessorsInitialized(
14464                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class);
14465        }
14466
14467        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.newBuilder()
14468        private Builder() {
14469          maybeForceBuilderInitialization();
14470        }
14471
14472        private Builder(
14473            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14474          super(parent);
14475          maybeForceBuilderInitialization();
14476        }
14477        private void maybeForceBuilderInitialization() {
14478          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14479          }
14480        }
14481        private static Builder create() {
14482          return new Builder();
14483        }
14484
14485        public Builder clear() {
14486          super.clear();
14487          referredId_ = 0L;
14488          bitField0_ = (bitField0_ & ~0x00000001);
14489          name_ = com.google.protobuf.ByteString.EMPTY;
14490          bitField0_ = (bitField0_ & ~0x00000002);
14491          dstSnapshotId_ = 0;
14492          bitField0_ = (bitField0_ & ~0x00000004);
14493          lastSnapshotId_ = 0;
14494          bitField0_ = (bitField0_ & ~0x00000008);
14495          return this;
14496        }
14497
14498        public Builder clone() {
14499          return create().mergeFrom(buildPartial());
14500        }
14501
14502        public com.google.protobuf.Descriptors.Descriptor
14503            getDescriptorForType() {
14504          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
14505        }
14506
14507        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstanceForType() {
14508          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance();
14509        }
14510
14511        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference build() {
14512          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = buildPartial();
14513          if (!result.isInitialized()) {
14514            throw newUninitializedMessageException(result);
14515          }
14516          return result;
14517        }
14518
14519        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference buildPartial() {
14520          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference(this);
14521          int from_bitField0_ = bitField0_;
14522          int to_bitField0_ = 0;
14523          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14524            to_bitField0_ |= 0x00000001;
14525          }
14526          result.referredId_ = referredId_;
14527          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14528            to_bitField0_ |= 0x00000002;
14529          }
14530          result.name_ = name_;
14531          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
14532            to_bitField0_ |= 0x00000004;
14533          }
14534          result.dstSnapshotId_ = dstSnapshotId_;
14535          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
14536            to_bitField0_ |= 0x00000008;
14537          }
14538          result.lastSnapshotId_ = lastSnapshotId_;
14539          result.bitField0_ = to_bitField0_;
14540          onBuilt();
14541          return result;
14542        }
14543
14544        public Builder mergeFrom(com.google.protobuf.Message other) {
14545          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) {
14546            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference)other);
14547          } else {
14548            super.mergeFrom(other);
14549            return this;
14550          }
14551        }
14552
14553        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference other) {
14554          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance()) return this;
14555          if (other.hasReferredId()) {
14556            setReferredId(other.getReferredId());
14557          }
14558          if (other.hasName()) {
14559            setName(other.getName());
14560          }
14561          if (other.hasDstSnapshotId()) {
14562            setDstSnapshotId(other.getDstSnapshotId());
14563          }
14564          if (other.hasLastSnapshotId()) {
14565            setLastSnapshotId(other.getLastSnapshotId());
14566          }
14567          this.mergeUnknownFields(other.getUnknownFields());
14568          return this;
14569        }
14570
14571        public final boolean isInitialized() {
14572          return true;
14573        }
14574
14575        public Builder mergeFrom(
14576            com.google.protobuf.CodedInputStream input,
14577            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14578            throws java.io.IOException {
14579          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parsedMessage = null;
14580          try {
14581            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14582          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14583            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) e.getUnfinishedMessage();
14584            throw e;
14585          } finally {
14586            if (parsedMessage != null) {
14587              mergeFrom(parsedMessage);
14588            }
14589          }
14590          return this;
14591        }
14592        private int bitField0_;
14593
14594        // optional uint64 referredId = 1;
14595        private long referredId_ ;
14596        /**
14597         * <code>optional uint64 referredId = 1;</code>
14598         *
14599         * <pre>
14600         * id of the referred inode
14601         * </pre>
14602         */
14603        public boolean hasReferredId() {
14604          return ((bitField0_ & 0x00000001) == 0x00000001);
14605        }
14606        /**
14607         * <code>optional uint64 referredId = 1;</code>
14608         *
14609         * <pre>
14610         * id of the referred inode
14611         * </pre>
14612         */
14613        public long getReferredId() {
14614          return referredId_;
14615        }
14616        /**
14617         * <code>optional uint64 referredId = 1;</code>
14618         *
14619         * <pre>
14620         * id of the referred inode
14621         * </pre>
14622         */
14623        public Builder setReferredId(long value) {
14624          bitField0_ |= 0x00000001;
14625          referredId_ = value;
14626          onChanged();
14627          return this;
14628        }
14629        /**
14630         * <code>optional uint64 referredId = 1;</code>
14631         *
14632         * <pre>
14633         * id of the referred inode
14634         * </pre>
14635         */
14636        public Builder clearReferredId() {
14637          bitField0_ = (bitField0_ & ~0x00000001);
14638          referredId_ = 0L;
14639          onChanged();
14640          return this;
14641        }
14642
14643        // optional bytes name = 2;
14644        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
14645        /**
14646         * <code>optional bytes name = 2;</code>
14647         *
14648         * <pre>
14649         * local name recorded in WithName
14650         * </pre>
14651         */
14652        public boolean hasName() {
14653          return ((bitField0_ & 0x00000002) == 0x00000002);
14654        }
14655        /**
14656         * <code>optional bytes name = 2;</code>
14657         *
14658         * <pre>
14659         * local name recorded in WithName
14660         * </pre>
14661         */
14662        public com.google.protobuf.ByteString getName() {
14663          return name_;
14664        }
14665        /**
14666         * <code>optional bytes name = 2;</code>
14667         *
14668         * <pre>
14669         * local name recorded in WithName
14670         * </pre>
14671         */
14672        public Builder setName(com.google.protobuf.ByteString value) {
14673          if (value == null) {
14674    throw new NullPointerException();
14675  }
14676  bitField0_ |= 0x00000002;
14677          name_ = value;
14678          onChanged();
14679          return this;
14680        }
14681        /**
14682         * <code>optional bytes name = 2;</code>
14683         *
14684         * <pre>
14685         * local name recorded in WithName
14686         * </pre>
14687         */
14688        public Builder clearName() {
14689          bitField0_ = (bitField0_ & ~0x00000002);
14690          name_ = getDefaultInstance().getName();
14691          onChanged();
14692          return this;
14693        }
14694
14695        // optional uint32 dstSnapshotId = 3;
14696        private int dstSnapshotId_ ;
14697        /**
14698         * <code>optional uint32 dstSnapshotId = 3;</code>
14699         *
14700         * <pre>
14701         * recorded in DstReference
14702         * </pre>
14703         */
14704        public boolean hasDstSnapshotId() {
14705          return ((bitField0_ & 0x00000004) == 0x00000004);
14706        }
14707        /**
14708         * <code>optional uint32 dstSnapshotId = 3;</code>
14709         *
14710         * <pre>
14711         * recorded in DstReference
14712         * </pre>
14713         */
14714        public int getDstSnapshotId() {
14715          return dstSnapshotId_;
14716        }
14717        /**
14718         * <code>optional uint32 dstSnapshotId = 3;</code>
14719         *
14720         * <pre>
14721         * recorded in DstReference
14722         * </pre>
14723         */
14724        public Builder setDstSnapshotId(int value) {
14725          bitField0_ |= 0x00000004;
14726          dstSnapshotId_ = value;
14727          onChanged();
14728          return this;
14729        }
14730        /**
14731         * <code>optional uint32 dstSnapshotId = 3;</code>
14732         *
14733         * <pre>
14734         * recorded in DstReference
14735         * </pre>
14736         */
14737        public Builder clearDstSnapshotId() {
14738          bitField0_ = (bitField0_ & ~0x00000004);
14739          dstSnapshotId_ = 0;
14740          onChanged();
14741          return this;
14742        }
14743
14744        // optional uint32 lastSnapshotId = 4;
14745        private int lastSnapshotId_ ;
14746        /**
14747         * <code>optional uint32 lastSnapshotId = 4;</code>
14748         *
14749         * <pre>
14750         * recorded in WithName
14751         * </pre>
14752         */
14753        public boolean hasLastSnapshotId() {
14754          return ((bitField0_ & 0x00000008) == 0x00000008);
14755        }
14756        /**
14757         * <code>optional uint32 lastSnapshotId = 4;</code>
14758         *
14759         * <pre>
14760         * recorded in WithName
14761         * </pre>
14762         */
14763        public int getLastSnapshotId() {
14764          return lastSnapshotId_;
14765        }
14766        /**
14767         * <code>optional uint32 lastSnapshotId = 4;</code>
14768         *
14769         * <pre>
14770         * recorded in WithName
14771         * </pre>
14772         */
14773        public Builder setLastSnapshotId(int value) {
14774          bitField0_ |= 0x00000008;
14775          lastSnapshotId_ = value;
14776          onChanged();
14777          return this;
14778        }
14779        /**
14780         * <code>optional uint32 lastSnapshotId = 4;</code>
14781         *
14782         * <pre>
14783         * recorded in WithName
14784         * </pre>
14785         */
14786        public Builder clearLastSnapshotId() {
14787          bitField0_ = (bitField0_ & ~0x00000008);
14788          lastSnapshotId_ = 0;
14789          onChanged();
14790          return this;
14791        }
14792
14793        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
14794      }
14795
14796      static {
14797        defaultInstance = new INodeReference(true);
14798        defaultInstance.initFields();
14799      }
14800
14801      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
14802    }
14803
14804    private void initFields() {
14805    }
14806    private byte memoizedIsInitialized = -1;
14807    public final boolean isInitialized() {
14808      byte isInitialized = memoizedIsInitialized;
14809      if (isInitialized != -1) return isInitialized == 1;
14810
14811      memoizedIsInitialized = 1;
14812      return true;
14813    }
14814
14815    public void writeTo(com.google.protobuf.CodedOutputStream output)
14816                        throws java.io.IOException {
14817      getSerializedSize();
14818      getUnknownFields().writeTo(output);
14819    }
14820
14821    private int memoizedSerializedSize = -1;
14822    public int getSerializedSize() {
14823      int size = memoizedSerializedSize;
14824      if (size != -1) return size;
14825
14826      size = 0;
14827      size += getUnknownFields().getSerializedSize();
14828      memoizedSerializedSize = size;
14829      return size;
14830    }
14831
14832    private static final long serialVersionUID = 0L;
14833    @java.lang.Override
14834    protected java.lang.Object writeReplace()
14835        throws java.io.ObjectStreamException {
14836      return super.writeReplace();
14837    }
14838
14839    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14840        com.google.protobuf.ByteString data)
14841        throws com.google.protobuf.InvalidProtocolBufferException {
14842      return PARSER.parseFrom(data);
14843    }
14844    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14845        com.google.protobuf.ByteString data,
14846        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14847        throws com.google.protobuf.InvalidProtocolBufferException {
14848      return PARSER.parseFrom(data, extensionRegistry);
14849    }
14850    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(byte[] data)
14851        throws com.google.protobuf.InvalidProtocolBufferException {
14852      return PARSER.parseFrom(data);
14853    }
14854    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14855        byte[] data,
14856        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14857        throws com.google.protobuf.InvalidProtocolBufferException {
14858      return PARSER.parseFrom(data, extensionRegistry);
14859    }
14860    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(java.io.InputStream input)
14861        throws java.io.IOException {
14862      return PARSER.parseFrom(input);
14863    }
14864    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14865        java.io.InputStream input,
14866        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14867        throws java.io.IOException {
14868      return PARSER.parseFrom(input, extensionRegistry);
14869    }
14870    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom(java.io.InputStream input)
14871        throws java.io.IOException {
14872      return PARSER.parseDelimitedFrom(input);
14873    }
14874    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom(
14875        java.io.InputStream input,
14876        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14877        throws java.io.IOException {
14878      return PARSER.parseDelimitedFrom(input, extensionRegistry);
14879    }
14880    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14881        com.google.protobuf.CodedInputStream input)
14882        throws java.io.IOException {
14883      return PARSER.parseFrom(input);
14884    }
14885    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
14886        com.google.protobuf.CodedInputStream input,
14887        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14888        throws java.io.IOException {
14889      return PARSER.parseFrom(input, extensionRegistry);
14890    }
14891
14892    public static Builder newBuilder() { return Builder.create(); }
14893    public Builder newBuilderForType() { return newBuilder(); }
14894    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection prototype) {
14895      return newBuilder().mergeFrom(prototype);
14896    }
14897    public Builder toBuilder() { return newBuilder(this); }
14898
14899    @java.lang.Override
14900    protected Builder newBuilderForType(
14901        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14902      Builder builder = new Builder(parent);
14903      return builder;
14904    }
14905    /**
14906     * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection}
14907     */
14908    public static final class Builder extends
14909        com.google.protobuf.GeneratedMessage.Builder<Builder>
14910       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSectionOrBuilder {
14911      public static final com.google.protobuf.Descriptors.Descriptor
14912          getDescriptor() {
14913        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
14914      }
14915
14916      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14917          internalGetFieldAccessorTable() {
14918        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable
14919            .ensureFieldAccessorsInitialized(
14920                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class);
14921      }
14922
14923      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.newBuilder()
14924      private Builder() {
14925        maybeForceBuilderInitialization();
14926      }
14927
14928      private Builder(
14929          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14930        super(parent);
14931        maybeForceBuilderInitialization();
14932      }
14933      private void maybeForceBuilderInitialization() {
14934        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14935        }
14936      }
14937      private static Builder create() {
14938        return new Builder();
14939      }
14940
14941      public Builder clear() {
14942        super.clear();
14943        return this;
14944      }
14945
14946      public Builder clone() {
14947        return create().mergeFrom(buildPartial());
14948      }
14949
14950      public com.google.protobuf.Descriptors.Descriptor
14951          getDescriptorForType() {
14952        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
14953      }
14954
14955      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstanceForType() {
14956        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance();
14957      }
14958
14959      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection build() {
14960        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = buildPartial();
14961        if (!result.isInitialized()) {
14962          throw newUninitializedMessageException(result);
14963        }
14964        return result;
14965      }
14966
14967      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection buildPartial() {
14968        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection(this);
14969        onBuilt();
14970        return result;
14971      }
14972
14973      public Builder mergeFrom(com.google.protobuf.Message other) {
14974        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) {
14975          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection)other);
14976        } else {
14977          super.mergeFrom(other);
14978          return this;
14979        }
14980      }
14981
14982      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection other) {
14983        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance()) return this;
14984        this.mergeUnknownFields(other.getUnknownFields());
14985        return this;
14986      }
14987
14988      public final boolean isInitialized() {
14989        return true;
14990      }
14991
14992      public Builder mergeFrom(
14993          com.google.protobuf.CodedInputStream input,
14994          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14995          throws java.io.IOException {
14996        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parsedMessage = null;
14997        try {
14998          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14999        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15000          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) e.getUnfinishedMessage();
15001          throw e;
15002        } finally {
15003          if (parsedMessage != null) {
15004            mergeFrom(parsedMessage);
15005          }
15006        }
15007        return this;
15008      }
15009
15010      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection)
15011    }
15012
15013    static {
15014      defaultInstance = new INodeReferenceSection(true);
15015      defaultInstance.initFields();
15016    }
15017
15018    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection)
15019  }
15020
15021  public interface SnapshotSectionOrBuilder
15022      extends com.google.protobuf.MessageOrBuilder {
15023
15024    // optional uint32 snapshotCounter = 1;
15025    /**
15026     * <code>optional uint32 snapshotCounter = 1;</code>
15027     */
15028    boolean hasSnapshotCounter();
15029    /**
15030     * <code>optional uint32 snapshotCounter = 1;</code>
15031     */
15032    int getSnapshotCounter();
15033
15034    // repeated uint64 snapshottableDir = 2 [packed = true];
15035    /**
15036     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15037     */
15038    java.util.List<java.lang.Long> getSnapshottableDirList();
15039    /**
15040     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15041     */
15042    int getSnapshottableDirCount();
15043    /**
15044     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15045     */
15046    long getSnapshottableDir(int index);
15047
15048    // optional uint32 numSnapshots = 3;
15049    /**
15050     * <code>optional uint32 numSnapshots = 3;</code>
15051     *
15052     * <pre>
15053     * total number of snapshots
15054     * </pre>
15055     */
15056    boolean hasNumSnapshots();
15057    /**
15058     * <code>optional uint32 numSnapshots = 3;</code>
15059     *
15060     * <pre>
15061     * total number of snapshots
15062     * </pre>
15063     */
15064    int getNumSnapshots();
15065  }
15066  /**
15067   * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection}
15068   *
15069   * <pre>
15070   **
15071   * This section records the information about snapshot
15072   * NAME: SNAPSHOT
15073   * </pre>
15074   */
15075  public static final class SnapshotSection extends
15076      com.google.protobuf.GeneratedMessage
15077      implements SnapshotSectionOrBuilder {
15078    // Use SnapshotSection.newBuilder() to construct.
15079    private SnapshotSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15080      super(builder);
15081      this.unknownFields = builder.getUnknownFields();
15082    }
15083    private SnapshotSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15084
15085    private static final SnapshotSection defaultInstance;
15086    public static SnapshotSection getDefaultInstance() {
15087      return defaultInstance;
15088    }
15089
15090    public SnapshotSection getDefaultInstanceForType() {
15091      return defaultInstance;
15092    }
15093
15094    private final com.google.protobuf.UnknownFieldSet unknownFields;
15095    @java.lang.Override
15096    public final com.google.protobuf.UnknownFieldSet
15097        getUnknownFields() {
15098      return this.unknownFields;
15099    }
15100    private SnapshotSection(
15101        com.google.protobuf.CodedInputStream input,
15102        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15103        throws com.google.protobuf.InvalidProtocolBufferException {
15104      initFields();
15105      int mutable_bitField0_ = 0;
15106      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15107          com.google.protobuf.UnknownFieldSet.newBuilder();
15108      try {
15109        boolean done = false;
15110        while (!done) {
15111          int tag = input.readTag();
15112          switch (tag) {
15113            case 0:
15114              done = true;
15115              break;
15116            default: {
15117              if (!parseUnknownField(input, unknownFields,
15118                                     extensionRegistry, tag)) {
15119                done = true;
15120              }
15121              break;
15122            }
15123            case 8: {
15124              bitField0_ |= 0x00000001;
15125              snapshotCounter_ = input.readUInt32();
15126              break;
15127            }
15128            case 16: {
15129              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
15130                snapshottableDir_ = new java.util.ArrayList<java.lang.Long>();
15131                mutable_bitField0_ |= 0x00000002;
15132              }
15133              snapshottableDir_.add(input.readUInt64());
15134              break;
15135            }
15136            case 18: {
15137              int length = input.readRawVarint32();
15138              int limit = input.pushLimit(length);
15139              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
15140                snapshottableDir_ = new java.util.ArrayList<java.lang.Long>();
15141                mutable_bitField0_ |= 0x00000002;
15142              }
15143              while (input.getBytesUntilLimit() > 0) {
15144                snapshottableDir_.add(input.readUInt64());
15145              }
15146              input.popLimit(limit);
15147              break;
15148            }
15149            case 24: {
15150              bitField0_ |= 0x00000002;
15151              numSnapshots_ = input.readUInt32();
15152              break;
15153            }
15154          }
15155        }
15156      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15157        throw e.setUnfinishedMessage(this);
15158      } catch (java.io.IOException e) {
15159        throw new com.google.protobuf.InvalidProtocolBufferException(
15160            e.getMessage()).setUnfinishedMessage(this);
15161      } finally {
15162        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
15163          snapshottableDir_ = java.util.Collections.unmodifiableList(snapshottableDir_);
15164        }
15165        this.unknownFields = unknownFields.build();
15166        makeExtensionsImmutable();
15167      }
15168    }
15169    public static final com.google.protobuf.Descriptors.Descriptor
15170        getDescriptor() {
15171      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
15172    }
15173
15174    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15175        internalGetFieldAccessorTable() {
15176      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable
15177          .ensureFieldAccessorsInitialized(
15178              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class);
15179    }
15180
15181    public static com.google.protobuf.Parser<SnapshotSection> PARSER =
15182        new com.google.protobuf.AbstractParser<SnapshotSection>() {
15183      public SnapshotSection parsePartialFrom(
15184          com.google.protobuf.CodedInputStream input,
15185          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15186          throws com.google.protobuf.InvalidProtocolBufferException {
15187        return new SnapshotSection(input, extensionRegistry);
15188      }
15189    };
15190
15191    @java.lang.Override
15192    public com.google.protobuf.Parser<SnapshotSection> getParserForType() {
15193      return PARSER;
15194    }
15195
15196    public interface SnapshotOrBuilder
15197        extends com.google.protobuf.MessageOrBuilder {
15198
15199      // optional uint32 snapshotId = 1;
15200      /**
15201       * <code>optional uint32 snapshotId = 1;</code>
15202       */
15203      boolean hasSnapshotId();
15204      /**
15205       * <code>optional uint32 snapshotId = 1;</code>
15206       */
15207      int getSnapshotId();
15208
15209      // optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;
15210      /**
15211       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15212       *
15213       * <pre>
15214       * Snapshot root
15215       * </pre>
15216       */
15217      boolean hasRoot();
15218      /**
15219       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15220       *
15221       * <pre>
15222       * Snapshot root
15223       * </pre>
15224       */
15225      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot();
15226      /**
15227       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15228       *
15229       * <pre>
15230       * Snapshot root
15231       * </pre>
15232       */
15233      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder();
15234    }
15235    /**
15236     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot}
15237     */
15238    public static final class Snapshot extends
15239        com.google.protobuf.GeneratedMessage
15240        implements SnapshotOrBuilder {
15241      // Use Snapshot.newBuilder() to construct.
15242      private Snapshot(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15243        super(builder);
15244        this.unknownFields = builder.getUnknownFields();
15245      }
15246      private Snapshot(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15247
15248      private static final Snapshot defaultInstance;
15249      public static Snapshot getDefaultInstance() {
15250        return defaultInstance;
15251      }
15252
15253      public Snapshot getDefaultInstanceForType() {
15254        return defaultInstance;
15255      }
15256
15257      private final com.google.protobuf.UnknownFieldSet unknownFields;
15258      @java.lang.Override
15259      public final com.google.protobuf.UnknownFieldSet
15260          getUnknownFields() {
15261        return this.unknownFields;
15262      }
15263      private Snapshot(
15264          com.google.protobuf.CodedInputStream input,
15265          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15266          throws com.google.protobuf.InvalidProtocolBufferException {
15267        initFields();
15268        int mutable_bitField0_ = 0;
15269        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15270            com.google.protobuf.UnknownFieldSet.newBuilder();
15271        try {
15272          boolean done = false;
15273          while (!done) {
15274            int tag = input.readTag();
15275            switch (tag) {
15276              case 0:
15277                done = true;
15278                break;
15279              default: {
15280                if (!parseUnknownField(input, unknownFields,
15281                                       extensionRegistry, tag)) {
15282                  done = true;
15283                }
15284                break;
15285              }
15286              case 8: {
15287                bitField0_ |= 0x00000001;
15288                snapshotId_ = input.readUInt32();
15289                break;
15290              }
15291              case 18: {
15292                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder subBuilder = null;
15293                if (((bitField0_ & 0x00000002) == 0x00000002)) {
15294                  subBuilder = root_.toBuilder();
15295                }
15296                root_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.PARSER, extensionRegistry);
15297                if (subBuilder != null) {
15298                  subBuilder.mergeFrom(root_);
15299                  root_ = subBuilder.buildPartial();
15300                }
15301                bitField0_ |= 0x00000002;
15302                break;
15303              }
15304            }
15305          }
15306        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15307          throw e.setUnfinishedMessage(this);
15308        } catch (java.io.IOException e) {
15309          throw new com.google.protobuf.InvalidProtocolBufferException(
15310              e.getMessage()).setUnfinishedMessage(this);
15311        } finally {
15312          this.unknownFields = unknownFields.build();
15313          makeExtensionsImmutable();
15314        }
15315      }
15316      public static final com.google.protobuf.Descriptors.Descriptor
15317          getDescriptor() {
15318        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
15319      }
15320
15321      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15322          internalGetFieldAccessorTable() {
15323        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable
15324            .ensureFieldAccessorsInitialized(
15325                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class);
15326      }
15327
15328      public static com.google.protobuf.Parser<Snapshot> PARSER =
15329          new com.google.protobuf.AbstractParser<Snapshot>() {
15330        public Snapshot parsePartialFrom(
15331            com.google.protobuf.CodedInputStream input,
15332            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15333            throws com.google.protobuf.InvalidProtocolBufferException {
15334          return new Snapshot(input, extensionRegistry);
15335        }
15336      };
15337
15338      @java.lang.Override
15339      public com.google.protobuf.Parser<Snapshot> getParserForType() {
15340        return PARSER;
15341      }
15342
15343      private int bitField0_;
15344      // optional uint32 snapshotId = 1;
15345      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
15346      private int snapshotId_;
15347      /**
15348       * <code>optional uint32 snapshotId = 1;</code>
15349       */
15350      public boolean hasSnapshotId() {
15351        return ((bitField0_ & 0x00000001) == 0x00000001);
15352      }
15353      /**
15354       * <code>optional uint32 snapshotId = 1;</code>
15355       */
15356      public int getSnapshotId() {
15357        return snapshotId_;
15358      }
15359
15360      // optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;
15361      public static final int ROOT_FIELD_NUMBER = 2;
15362      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_;
15363      /**
15364       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15365       *
15366       * <pre>
15367       * Snapshot root
15368       * </pre>
15369       */
15370      public boolean hasRoot() {
15371        return ((bitField0_ & 0x00000002) == 0x00000002);
15372      }
15373      /**
15374       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15375       *
15376       * <pre>
15377       * Snapshot root
15378       * </pre>
15379       */
15380      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() {
15381        return root_;
15382      }
15383      /**
15384       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15385       *
15386       * <pre>
15387       * Snapshot root
15388       * </pre>
15389       */
15390      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() {
15391        return root_;
15392      }
15393
15394      private void initFields() {
15395        snapshotId_ = 0;
15396        root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
15397      }
15398      private byte memoizedIsInitialized = -1;
15399      public final boolean isInitialized() {
15400        byte isInitialized = memoizedIsInitialized;
15401        if (isInitialized != -1) return isInitialized == 1;
15402
15403        if (hasRoot()) {
15404          if (!getRoot().isInitialized()) {
15405            memoizedIsInitialized = 0;
15406            return false;
15407          }
15408        }
15409        memoizedIsInitialized = 1;
15410        return true;
15411      }
15412
15413      public void writeTo(com.google.protobuf.CodedOutputStream output)
15414                          throws java.io.IOException {
15415        getSerializedSize();
15416        if (((bitField0_ & 0x00000001) == 0x00000001)) {
15417          output.writeUInt32(1, snapshotId_);
15418        }
15419        if (((bitField0_ & 0x00000002) == 0x00000002)) {
15420          output.writeMessage(2, root_);
15421        }
15422        getUnknownFields().writeTo(output);
15423      }
15424
15425      private int memoizedSerializedSize = -1;
15426      public int getSerializedSize() {
15427        int size = memoizedSerializedSize;
15428        if (size != -1) return size;
15429
15430        size = 0;
15431        if (((bitField0_ & 0x00000001) == 0x00000001)) {
15432          size += com.google.protobuf.CodedOutputStream
15433            .computeUInt32Size(1, snapshotId_);
15434        }
15435        if (((bitField0_ & 0x00000002) == 0x00000002)) {
15436          size += com.google.protobuf.CodedOutputStream
15437            .computeMessageSize(2, root_);
15438        }
15439        size += getUnknownFields().getSerializedSize();
15440        memoizedSerializedSize = size;
15441        return size;
15442      }
15443
15444      private static final long serialVersionUID = 0L;
15445      @java.lang.Override
15446      protected java.lang.Object writeReplace()
15447          throws java.io.ObjectStreamException {
15448        return super.writeReplace();
15449      }
15450
15451      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15452          com.google.protobuf.ByteString data)
15453          throws com.google.protobuf.InvalidProtocolBufferException {
15454        return PARSER.parseFrom(data);
15455      }
15456      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15457          com.google.protobuf.ByteString data,
15458          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15459          throws com.google.protobuf.InvalidProtocolBufferException {
15460        return PARSER.parseFrom(data, extensionRegistry);
15461      }
15462      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(byte[] data)
15463          throws com.google.protobuf.InvalidProtocolBufferException {
15464        return PARSER.parseFrom(data);
15465      }
15466      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15467          byte[] data,
15468          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15469          throws com.google.protobuf.InvalidProtocolBufferException {
15470        return PARSER.parseFrom(data, extensionRegistry);
15471      }
15472      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(java.io.InputStream input)
15473          throws java.io.IOException {
15474        return PARSER.parseFrom(input);
15475      }
15476      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15477          java.io.InputStream input,
15478          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15479          throws java.io.IOException {
15480        return PARSER.parseFrom(input, extensionRegistry);
15481      }
15482      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom(java.io.InputStream input)
15483          throws java.io.IOException {
15484        return PARSER.parseDelimitedFrom(input);
15485      }
15486      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom(
15487          java.io.InputStream input,
15488          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15489          throws java.io.IOException {
15490        return PARSER.parseDelimitedFrom(input, extensionRegistry);
15491      }
15492      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15493          com.google.protobuf.CodedInputStream input)
15494          throws java.io.IOException {
15495        return PARSER.parseFrom(input);
15496      }
15497      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
15498          com.google.protobuf.CodedInputStream input,
15499          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15500          throws java.io.IOException {
15501        return PARSER.parseFrom(input, extensionRegistry);
15502      }
15503
15504      public static Builder newBuilder() { return Builder.create(); }
15505      public Builder newBuilderForType() { return newBuilder(); }
15506      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot prototype) {
15507        return newBuilder().mergeFrom(prototype);
15508      }
15509      public Builder toBuilder() { return newBuilder(this); }
15510
15511      @java.lang.Override
15512      protected Builder newBuilderForType(
15513          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15514        Builder builder = new Builder(parent);
15515        return builder;
15516      }
15517      /**
15518       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot}
15519       */
15520      public static final class Builder extends
15521          com.google.protobuf.GeneratedMessage.Builder<Builder>
15522         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.SnapshotOrBuilder {
15523        public static final com.google.protobuf.Descriptors.Descriptor
15524            getDescriptor() {
15525          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
15526        }
15527
15528        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15529            internalGetFieldAccessorTable() {
15530          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable
15531              .ensureFieldAccessorsInitialized(
15532                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class);
15533        }
15534
15535        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.newBuilder()
15536        private Builder() {
15537          maybeForceBuilderInitialization();
15538        }
15539
15540        private Builder(
15541            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15542          super(parent);
15543          maybeForceBuilderInitialization();
15544        }
15545        private void maybeForceBuilderInitialization() {
15546          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15547            getRootFieldBuilder();
15548          }
15549        }
15550        private static Builder create() {
15551          return new Builder();
15552        }
15553
15554        public Builder clear() {
15555          super.clear();
15556          snapshotId_ = 0;
15557          bitField0_ = (bitField0_ & ~0x00000001);
15558          if (rootBuilder_ == null) {
15559            root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
15560          } else {
15561            rootBuilder_.clear();
15562          }
15563          bitField0_ = (bitField0_ & ~0x00000002);
15564          return this;
15565        }
15566
15567        public Builder clone() {
15568          return create().mergeFrom(buildPartial());
15569        }
15570
15571        public com.google.protobuf.Descriptors.Descriptor
15572            getDescriptorForType() {
15573          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
15574        }
15575
15576        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstanceForType() {
15577          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance();
15578        }
15579
15580        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot build() {
15581          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = buildPartial();
15582          if (!result.isInitialized()) {
15583            throw newUninitializedMessageException(result);
15584          }
15585          return result;
15586        }
15587
15588        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot buildPartial() {
15589          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot(this);
15590          int from_bitField0_ = bitField0_;
15591          int to_bitField0_ = 0;
15592          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15593            to_bitField0_ |= 0x00000001;
15594          }
15595          result.snapshotId_ = snapshotId_;
15596          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
15597            to_bitField0_ |= 0x00000002;
15598          }
15599          if (rootBuilder_ == null) {
15600            result.root_ = root_;
15601          } else {
15602            result.root_ = rootBuilder_.build();
15603          }
15604          result.bitField0_ = to_bitField0_;
15605          onBuilt();
15606          return result;
15607        }
15608
15609        public Builder mergeFrom(com.google.protobuf.Message other) {
15610          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) {
15611            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot)other);
15612          } else {
15613            super.mergeFrom(other);
15614            return this;
15615          }
15616        }
15617
15618        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot other) {
15619          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance()) return this;
15620          if (other.hasSnapshotId()) {
15621            setSnapshotId(other.getSnapshotId());
15622          }
15623          if (other.hasRoot()) {
15624            mergeRoot(other.getRoot());
15625          }
15626          this.mergeUnknownFields(other.getUnknownFields());
15627          return this;
15628        }
15629
15630        public final boolean isInitialized() {
15631          if (hasRoot()) {
15632            if (!getRoot().isInitialized()) {
15633              
15634              return false;
15635            }
15636          }
15637          return true;
15638        }
15639
15640        public Builder mergeFrom(
15641            com.google.protobuf.CodedInputStream input,
15642            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15643            throws java.io.IOException {
15644          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parsedMessage = null;
15645          try {
15646            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15647          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15648            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) e.getUnfinishedMessage();
15649            throw e;
15650          } finally {
15651            if (parsedMessage != null) {
15652              mergeFrom(parsedMessage);
15653            }
15654          }
15655          return this;
15656        }
15657        private int bitField0_;
15658
15659        // optional uint32 snapshotId = 1;
15660        private int snapshotId_ ;
15661        /**
15662         * <code>optional uint32 snapshotId = 1;</code>
15663         */
15664        public boolean hasSnapshotId() {
15665          return ((bitField0_ & 0x00000001) == 0x00000001);
15666        }
15667        /**
15668         * <code>optional uint32 snapshotId = 1;</code>
15669         */
15670        public int getSnapshotId() {
15671          return snapshotId_;
15672        }
15673        /**
15674         * <code>optional uint32 snapshotId = 1;</code>
15675         */
15676        public Builder setSnapshotId(int value) {
15677          bitField0_ |= 0x00000001;
15678          snapshotId_ = value;
15679          onChanged();
15680          return this;
15681        }
15682        /**
15683         * <code>optional uint32 snapshotId = 1;</code>
15684         */
15685        public Builder clearSnapshotId() {
15686          bitField0_ = (bitField0_ & ~0x00000001);
15687          snapshotId_ = 0;
15688          onChanged();
15689          return this;
15690        }
15691
15692        // optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;
15693        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
15694        private com.google.protobuf.SingleFieldBuilder<
15695            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> rootBuilder_;
15696        /**
15697         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15698         *
15699         * <pre>
15700         * Snapshot root
15701         * </pre>
15702         */
15703        public boolean hasRoot() {
15704          return ((bitField0_ & 0x00000002) == 0x00000002);
15705        }
15706        /**
15707         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15708         *
15709         * <pre>
15710         * Snapshot root
15711         * </pre>
15712         */
15713        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() {
15714          if (rootBuilder_ == null) {
15715            return root_;
15716          } else {
15717            return rootBuilder_.getMessage();
15718          }
15719        }
15720        /**
15721         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15722         *
15723         * <pre>
15724         * Snapshot root
15725         * </pre>
15726         */
15727        public Builder setRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) {
15728          if (rootBuilder_ == null) {
15729            if (value == null) {
15730              throw new NullPointerException();
15731            }
15732            root_ = value;
15733            onChanged();
15734          } else {
15735            rootBuilder_.setMessage(value);
15736          }
15737          bitField0_ |= 0x00000002;
15738          return this;
15739        }
15740        /**
15741         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15742         *
15743         * <pre>
15744         * Snapshot root
15745         * </pre>
15746         */
15747        public Builder setRoot(
15748            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder builderForValue) {
15749          if (rootBuilder_ == null) {
15750            root_ = builderForValue.build();
15751            onChanged();
15752          } else {
15753            rootBuilder_.setMessage(builderForValue.build());
15754          }
15755          bitField0_ |= 0x00000002;
15756          return this;
15757        }
15758        /**
15759         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15760         *
15761         * <pre>
15762         * Snapshot root
15763         * </pre>
15764         */
15765        public Builder mergeRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) {
15766          if (rootBuilder_ == null) {
15767            if (((bitField0_ & 0x00000002) == 0x00000002) &&
15768                root_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) {
15769              root_ =
15770                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder(root_).mergeFrom(value).buildPartial();
15771            } else {
15772              root_ = value;
15773            }
15774            onChanged();
15775          } else {
15776            rootBuilder_.mergeFrom(value);
15777          }
15778          bitField0_ |= 0x00000002;
15779          return this;
15780        }
15781        /**
15782         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15783         *
15784         * <pre>
15785         * Snapshot root
15786         * </pre>
15787         */
15788        public Builder clearRoot() {
15789          if (rootBuilder_ == null) {
15790            root_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
15791            onChanged();
15792          } else {
15793            rootBuilder_.clear();
15794          }
15795          bitField0_ = (bitField0_ & ~0x00000002);
15796          return this;
15797        }
15798        /**
15799         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15800         *
15801         * <pre>
15802         * Snapshot root
15803         * </pre>
15804         */
15805        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder getRootBuilder() {
15806          bitField0_ |= 0x00000002;
15807          onChanged();
15808          return getRootFieldBuilder().getBuilder();
15809        }
15810        /**
15811         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15812         *
15813         * <pre>
15814         * Snapshot root
15815         * </pre>
15816         */
15817        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() {
15818          if (rootBuilder_ != null) {
15819            return rootBuilder_.getMessageOrBuilder();
15820          } else {
15821            return root_;
15822          }
15823        }
15824        /**
15825         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
15826         *
15827         * <pre>
15828         * Snapshot root
15829         * </pre>
15830         */
15831        private com.google.protobuf.SingleFieldBuilder<
15832            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> 
15833            getRootFieldBuilder() {
15834          if (rootBuilder_ == null) {
15835            rootBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15836                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder>(
15837                    root_,
15838                    getParentForChildren(),
15839                    isClean());
15840            root_ = null;
15841          }
15842          return rootBuilder_;
15843        }
15844
15845        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
15846      }
15847
15848      static {
15849        defaultInstance = new Snapshot(true);
15850        defaultInstance.initFields();
15851      }
15852
15853      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
15854    }
15855
15856    private int bitField0_;
15857    // optional uint32 snapshotCounter = 1;
15858    public static final int SNAPSHOTCOUNTER_FIELD_NUMBER = 1;
15859    private int snapshotCounter_;
15860    /**
15861     * <code>optional uint32 snapshotCounter = 1;</code>
15862     */
15863    public boolean hasSnapshotCounter() {
15864      return ((bitField0_ & 0x00000001) == 0x00000001);
15865    }
15866    /**
15867     * <code>optional uint32 snapshotCounter = 1;</code>
15868     */
15869    public int getSnapshotCounter() {
15870      return snapshotCounter_;
15871    }
15872
15873    // repeated uint64 snapshottableDir = 2 [packed = true];
15874    public static final int SNAPSHOTTABLEDIR_FIELD_NUMBER = 2;
15875    private java.util.List<java.lang.Long> snapshottableDir_;
15876    /**
15877     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15878     */
15879    public java.util.List<java.lang.Long>
15880        getSnapshottableDirList() {
15881      return snapshottableDir_;
15882    }
15883    /**
15884     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15885     */
15886    public int getSnapshottableDirCount() {
15887      return snapshottableDir_.size();
15888    }
15889    /**
15890     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
15891     */
15892    public long getSnapshottableDir(int index) {
15893      return snapshottableDir_.get(index);
15894    }
15895    private int snapshottableDirMemoizedSerializedSize = -1;
15896
15897    // optional uint32 numSnapshots = 3;
15898    public static final int NUMSNAPSHOTS_FIELD_NUMBER = 3;
15899    private int numSnapshots_;
15900    /**
15901     * <code>optional uint32 numSnapshots = 3;</code>
15902     *
15903     * <pre>
15904     * total number of snapshots
15905     * </pre>
15906     */
15907    public boolean hasNumSnapshots() {
15908      return ((bitField0_ & 0x00000002) == 0x00000002);
15909    }
15910    /**
15911     * <code>optional uint32 numSnapshots = 3;</code>
15912     *
15913     * <pre>
15914     * total number of snapshots
15915     * </pre>
15916     */
15917    public int getNumSnapshots() {
15918      return numSnapshots_;
15919    }
15920
15921    private void initFields() {
15922      snapshotCounter_ = 0;
15923      snapshottableDir_ = java.util.Collections.emptyList();
15924      numSnapshots_ = 0;
15925    }
15926    private byte memoizedIsInitialized = -1;
15927    public final boolean isInitialized() {
15928      byte isInitialized = memoizedIsInitialized;
15929      if (isInitialized != -1) return isInitialized == 1;
15930
15931      memoizedIsInitialized = 1;
15932      return true;
15933    }
15934
15935    public void writeTo(com.google.protobuf.CodedOutputStream output)
15936                        throws java.io.IOException {
15937      getSerializedSize();
15938      if (((bitField0_ & 0x00000001) == 0x00000001)) {
15939        output.writeUInt32(1, snapshotCounter_);
15940      }
15941      if (getSnapshottableDirList().size() > 0) {
15942        output.writeRawVarint32(18);
15943        output.writeRawVarint32(snapshottableDirMemoizedSerializedSize);
15944      }
15945      for (int i = 0; i < snapshottableDir_.size(); i++) {
15946        output.writeUInt64NoTag(snapshottableDir_.get(i));
15947      }
15948      if (((bitField0_ & 0x00000002) == 0x00000002)) {
15949        output.writeUInt32(3, numSnapshots_);
15950      }
15951      getUnknownFields().writeTo(output);
15952    }
15953
15954    private int memoizedSerializedSize = -1;
15955    public int getSerializedSize() {
15956      int size = memoizedSerializedSize;
15957      if (size != -1) return size;
15958
15959      size = 0;
15960      if (((bitField0_ & 0x00000001) == 0x00000001)) {
15961        size += com.google.protobuf.CodedOutputStream
15962          .computeUInt32Size(1, snapshotCounter_);
15963      }
15964      {
15965        int dataSize = 0;
15966        for (int i = 0; i < snapshottableDir_.size(); i++) {
15967          dataSize += com.google.protobuf.CodedOutputStream
15968            .computeUInt64SizeNoTag(snapshottableDir_.get(i));
15969        }
15970        size += dataSize;
15971        if (!getSnapshottableDirList().isEmpty()) {
15972          size += 1;
15973          size += com.google.protobuf.CodedOutputStream
15974              .computeInt32SizeNoTag(dataSize);
15975        }
15976        snapshottableDirMemoizedSerializedSize = dataSize;
15977      }
15978      if (((bitField0_ & 0x00000002) == 0x00000002)) {
15979        size += com.google.protobuf.CodedOutputStream
15980          .computeUInt32Size(3, numSnapshots_);
15981      }
15982      size += getUnknownFields().getSerializedSize();
15983      memoizedSerializedSize = size;
15984      return size;
15985    }
15986
15987    private static final long serialVersionUID = 0L;
15988    @java.lang.Override
15989    protected java.lang.Object writeReplace()
15990        throws java.io.ObjectStreamException {
15991      return super.writeReplace();
15992    }
15993
15994    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
15995        com.google.protobuf.ByteString data)
15996        throws com.google.protobuf.InvalidProtocolBufferException {
15997      return PARSER.parseFrom(data);
15998    }
15999    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
16000        com.google.protobuf.ByteString data,
16001        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16002        throws com.google.protobuf.InvalidProtocolBufferException {
16003      return PARSER.parseFrom(data, extensionRegistry);
16004    }
16005    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(byte[] data)
16006        throws com.google.protobuf.InvalidProtocolBufferException {
16007      return PARSER.parseFrom(data);
16008    }
16009    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
16010        byte[] data,
16011        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16012        throws com.google.protobuf.InvalidProtocolBufferException {
16013      return PARSER.parseFrom(data, extensionRegistry);
16014    }
16015    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(java.io.InputStream input)
16016        throws java.io.IOException {
16017      return PARSER.parseFrom(input);
16018    }
16019    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
16020        java.io.InputStream input,
16021        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16022        throws java.io.IOException {
16023      return PARSER.parseFrom(input, extensionRegistry);
16024    }
16025    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom(java.io.InputStream input)
16026        throws java.io.IOException {
16027      return PARSER.parseDelimitedFrom(input);
16028    }
16029    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom(
16030        java.io.InputStream input,
16031        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16032        throws java.io.IOException {
16033      return PARSER.parseDelimitedFrom(input, extensionRegistry);
16034    }
16035    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
16036        com.google.protobuf.CodedInputStream input)
16037        throws java.io.IOException {
16038      return PARSER.parseFrom(input);
16039    }
16040    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
16041        com.google.protobuf.CodedInputStream input,
16042        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16043        throws java.io.IOException {
16044      return PARSER.parseFrom(input, extensionRegistry);
16045    }
16046
16047    public static Builder newBuilder() { return Builder.create(); }
16048    public Builder newBuilderForType() { return newBuilder(); }
16049    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection prototype) {
16050      return newBuilder().mergeFrom(prototype);
16051    }
16052    public Builder toBuilder() { return newBuilder(this); }
16053
16054    @java.lang.Override
16055    protected Builder newBuilderForType(
16056        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16057      Builder builder = new Builder(parent);
16058      return builder;
16059    }
16060    /**
16061     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection}
16062     *
16063     * <pre>
16064     **
16065     * This section records the information about snapshot
16066     * NAME: SNAPSHOT
16067     * </pre>
16068     */
16069    public static final class Builder extends
16070        com.google.protobuf.GeneratedMessage.Builder<Builder>
16071       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSectionOrBuilder {
16072      public static final com.google.protobuf.Descriptors.Descriptor
16073          getDescriptor() {
16074        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
16075      }
16076
16077      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16078          internalGetFieldAccessorTable() {
16079        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable
16080            .ensureFieldAccessorsInitialized(
16081                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class);
16082      }
16083
16084      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.newBuilder()
16085      private Builder() {
16086        maybeForceBuilderInitialization();
16087      }
16088
16089      private Builder(
16090          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16091        super(parent);
16092        maybeForceBuilderInitialization();
16093      }
16094      private void maybeForceBuilderInitialization() {
16095        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16096        }
16097      }
16098      private static Builder create() {
16099        return new Builder();
16100      }
16101
16102      public Builder clear() {
16103        super.clear();
16104        snapshotCounter_ = 0;
16105        bitField0_ = (bitField0_ & ~0x00000001);
16106        snapshottableDir_ = java.util.Collections.emptyList();
16107        bitField0_ = (bitField0_ & ~0x00000002);
16108        numSnapshots_ = 0;
16109        bitField0_ = (bitField0_ & ~0x00000004);
16110        return this;
16111      }
16112
16113      public Builder clone() {
16114        return create().mergeFrom(buildPartial());
16115      }
16116
16117      public com.google.protobuf.Descriptors.Descriptor
16118          getDescriptorForType() {
16119        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
16120      }
16121
16122      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstanceForType() {
16123        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance();
16124      }
16125
16126      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection build() {
16127        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = buildPartial();
16128        if (!result.isInitialized()) {
16129          throw newUninitializedMessageException(result);
16130        }
16131        return result;
16132      }
16133
16134      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection buildPartial() {
16135        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection(this);
16136        int from_bitField0_ = bitField0_;
16137        int to_bitField0_ = 0;
16138        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16139          to_bitField0_ |= 0x00000001;
16140        }
16141        result.snapshotCounter_ = snapshotCounter_;
16142        if (((bitField0_ & 0x00000002) == 0x00000002)) {
16143          snapshottableDir_ = java.util.Collections.unmodifiableList(snapshottableDir_);
16144          bitField0_ = (bitField0_ & ~0x00000002);
16145        }
16146        result.snapshottableDir_ = snapshottableDir_;
16147        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
16148          to_bitField0_ |= 0x00000002;
16149        }
16150        result.numSnapshots_ = numSnapshots_;
16151        result.bitField0_ = to_bitField0_;
16152        onBuilt();
16153        return result;
16154      }
16155
16156      public Builder mergeFrom(com.google.protobuf.Message other) {
16157        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) {
16158          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection)other);
16159        } else {
16160          super.mergeFrom(other);
16161          return this;
16162        }
16163      }
16164
16165      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection other) {
16166        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance()) return this;
16167        if (other.hasSnapshotCounter()) {
16168          setSnapshotCounter(other.getSnapshotCounter());
16169        }
16170        if (!other.snapshottableDir_.isEmpty()) {
16171          if (snapshottableDir_.isEmpty()) {
16172            snapshottableDir_ = other.snapshottableDir_;
16173            bitField0_ = (bitField0_ & ~0x00000002);
16174          } else {
16175            ensureSnapshottableDirIsMutable();
16176            snapshottableDir_.addAll(other.snapshottableDir_);
16177          }
16178          onChanged();
16179        }
16180        if (other.hasNumSnapshots()) {
16181          setNumSnapshots(other.getNumSnapshots());
16182        }
16183        this.mergeUnknownFields(other.getUnknownFields());
16184        return this;
16185      }
16186
16187      public final boolean isInitialized() {
16188        return true;
16189      }
16190
16191      public Builder mergeFrom(
16192          com.google.protobuf.CodedInputStream input,
16193          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16194          throws java.io.IOException {
16195        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parsedMessage = null;
16196        try {
16197          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16198        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16199          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) e.getUnfinishedMessage();
16200          throw e;
16201        } finally {
16202          if (parsedMessage != null) {
16203            mergeFrom(parsedMessage);
16204          }
16205        }
16206        return this;
16207      }
16208      private int bitField0_;
16209
16210      // optional uint32 snapshotCounter = 1;
16211      private int snapshotCounter_ ;
16212      /**
16213       * <code>optional uint32 snapshotCounter = 1;</code>
16214       */
16215      public boolean hasSnapshotCounter() {
16216        return ((bitField0_ & 0x00000001) == 0x00000001);
16217      }
16218      /**
16219       * <code>optional uint32 snapshotCounter = 1;</code>
16220       */
16221      public int getSnapshotCounter() {
16222        return snapshotCounter_;
16223      }
16224      /**
16225       * <code>optional uint32 snapshotCounter = 1;</code>
16226       */
16227      public Builder setSnapshotCounter(int value) {
16228        bitField0_ |= 0x00000001;
16229        snapshotCounter_ = value;
16230        onChanged();
16231        return this;
16232      }
16233      /**
16234       * <code>optional uint32 snapshotCounter = 1;</code>
16235       */
16236      public Builder clearSnapshotCounter() {
16237        bitField0_ = (bitField0_ & ~0x00000001);
16238        snapshotCounter_ = 0;
16239        onChanged();
16240        return this;
16241      }
16242
16243      // repeated uint64 snapshottableDir = 2 [packed = true];
16244      private java.util.List<java.lang.Long> snapshottableDir_ = java.util.Collections.emptyList();
16245      private void ensureSnapshottableDirIsMutable() {
16246        if (!((bitField0_ & 0x00000002) == 0x00000002)) {
16247          snapshottableDir_ = new java.util.ArrayList<java.lang.Long>(snapshottableDir_);
16248          bitField0_ |= 0x00000002;
16249         }
16250      }
16251      /**
16252       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16253       */
16254      public java.util.List<java.lang.Long>
16255          getSnapshottableDirList() {
16256        return java.util.Collections.unmodifiableList(snapshottableDir_);
16257      }
16258      /**
16259       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16260       */
16261      public int getSnapshottableDirCount() {
16262        return snapshottableDir_.size();
16263      }
16264      /**
16265       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16266       */
16267      public long getSnapshottableDir(int index) {
16268        return snapshottableDir_.get(index);
16269      }
16270      /**
16271       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16272       */
16273      public Builder setSnapshottableDir(
16274          int index, long value) {
16275        ensureSnapshottableDirIsMutable();
16276        snapshottableDir_.set(index, value);
16277        onChanged();
16278        return this;
16279      }
16280      /**
16281       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16282       */
16283      public Builder addSnapshottableDir(long value) {
16284        ensureSnapshottableDirIsMutable();
16285        snapshottableDir_.add(value);
16286        onChanged();
16287        return this;
16288      }
16289      /**
16290       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16291       */
16292      public Builder addAllSnapshottableDir(
16293          java.lang.Iterable<? extends java.lang.Long> values) {
16294        ensureSnapshottableDirIsMutable();
16295        super.addAll(values, snapshottableDir_);
16296        onChanged();
16297        return this;
16298      }
16299      /**
16300       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
16301       */
16302      public Builder clearSnapshottableDir() {
16303        snapshottableDir_ = java.util.Collections.emptyList();
16304        bitField0_ = (bitField0_ & ~0x00000002);
16305        onChanged();
16306        return this;
16307      }
16308
16309      // optional uint32 numSnapshots = 3;
16310      private int numSnapshots_ ;
16311      /**
16312       * <code>optional uint32 numSnapshots = 3;</code>
16313       *
16314       * <pre>
16315       * total number of snapshots
16316       * </pre>
16317       */
16318      public boolean hasNumSnapshots() {
16319        return ((bitField0_ & 0x00000004) == 0x00000004);
16320      }
16321      /**
16322       * <code>optional uint32 numSnapshots = 3;</code>
16323       *
16324       * <pre>
16325       * total number of snapshots
16326       * </pre>
16327       */
16328      public int getNumSnapshots() {
16329        return numSnapshots_;
16330      }
16331      /**
16332       * <code>optional uint32 numSnapshots = 3;</code>
16333       *
16334       * <pre>
16335       * total number of snapshots
16336       * </pre>
16337       */
16338      public Builder setNumSnapshots(int value) {
16339        bitField0_ |= 0x00000004;
16340        numSnapshots_ = value;
16341        onChanged();
16342        return this;
16343      }
16344      /**
16345       * <code>optional uint32 numSnapshots = 3;</code>
16346       *
16347       * <pre>
16348       * total number of snapshots
16349       * </pre>
16350       */
16351      public Builder clearNumSnapshots() {
16352        bitField0_ = (bitField0_ & ~0x00000004);
16353        numSnapshots_ = 0;
16354        onChanged();
16355        return this;
16356      }
16357
16358      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection)
16359    }
16360
16361    static {
16362      defaultInstance = new SnapshotSection(true);
16363      defaultInstance.initFields();
16364    }
16365
16366    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection)
16367  }
16368
16369  public interface SnapshotDiffSectionOrBuilder
16370      extends com.google.protobuf.MessageOrBuilder {
16371  }
16372  /**
16373   * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection}
16374   *
16375   * <pre>
16376   **
16377   * This section records information about snapshot diffs
16378   * NAME: SNAPSHOT_DIFF
16379   * </pre>
16380   */
16381  public static final class SnapshotDiffSection extends
16382      com.google.protobuf.GeneratedMessage
16383      implements SnapshotDiffSectionOrBuilder {
16384    // Use SnapshotDiffSection.newBuilder() to construct.
16385    private SnapshotDiffSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16386      super(builder);
16387      this.unknownFields = builder.getUnknownFields();
16388    }
16389    private SnapshotDiffSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16390
16391    private static final SnapshotDiffSection defaultInstance;
16392    public static SnapshotDiffSection getDefaultInstance() {
16393      return defaultInstance;
16394    }
16395
16396    public SnapshotDiffSection getDefaultInstanceForType() {
16397      return defaultInstance;
16398    }
16399
16400    private final com.google.protobuf.UnknownFieldSet unknownFields;
16401    @java.lang.Override
16402    public final com.google.protobuf.UnknownFieldSet
16403        getUnknownFields() {
16404      return this.unknownFields;
16405    }
16406    private SnapshotDiffSection(
16407        com.google.protobuf.CodedInputStream input,
16408        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16409        throws com.google.protobuf.InvalidProtocolBufferException {
16410      initFields();
16411      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16412          com.google.protobuf.UnknownFieldSet.newBuilder();
16413      try {
16414        boolean done = false;
16415        while (!done) {
16416          int tag = input.readTag();
16417          switch (tag) {
16418            case 0:
16419              done = true;
16420              break;
16421            default: {
16422              if (!parseUnknownField(input, unknownFields,
16423                                     extensionRegistry, tag)) {
16424                done = true;
16425              }
16426              break;
16427            }
16428          }
16429        }
16430      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16431        throw e.setUnfinishedMessage(this);
16432      } catch (java.io.IOException e) {
16433        throw new com.google.protobuf.InvalidProtocolBufferException(
16434            e.getMessage()).setUnfinishedMessage(this);
16435      } finally {
16436        this.unknownFields = unknownFields.build();
16437        makeExtensionsImmutable();
16438      }
16439    }
16440    public static final com.google.protobuf.Descriptors.Descriptor
16441        getDescriptor() {
16442      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
16443    }
16444
16445    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16446        internalGetFieldAccessorTable() {
16447      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable
16448          .ensureFieldAccessorsInitialized(
16449              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class);
16450    }
16451
16452    public static com.google.protobuf.Parser<SnapshotDiffSection> PARSER =
16453        new com.google.protobuf.AbstractParser<SnapshotDiffSection>() {
16454      public SnapshotDiffSection parsePartialFrom(
16455          com.google.protobuf.CodedInputStream input,
16456          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16457          throws com.google.protobuf.InvalidProtocolBufferException {
16458        return new SnapshotDiffSection(input, extensionRegistry);
16459      }
16460    };
16461
16462    @java.lang.Override
16463    public com.google.protobuf.Parser<SnapshotDiffSection> getParserForType() {
16464      return PARSER;
16465    }
16466
16467    public interface CreatedListEntryOrBuilder
16468        extends com.google.protobuf.MessageOrBuilder {
16469
16470      // optional bytes name = 1;
16471      /**
16472       * <code>optional bytes name = 1;</code>
16473       */
16474      boolean hasName();
16475      /**
16476       * <code>optional bytes name = 1;</code>
16477       */
16478      com.google.protobuf.ByteString getName();
16479    }
16480    /**
16481     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry}
16482     */
16483    public static final class CreatedListEntry extends
16484        com.google.protobuf.GeneratedMessage
16485        implements CreatedListEntryOrBuilder {
16486      // Use CreatedListEntry.newBuilder() to construct.
16487      private CreatedListEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16488        super(builder);
16489        this.unknownFields = builder.getUnknownFields();
16490      }
16491      private CreatedListEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16492
16493      private static final CreatedListEntry defaultInstance;
16494      public static CreatedListEntry getDefaultInstance() {
16495        return defaultInstance;
16496      }
16497
16498      public CreatedListEntry getDefaultInstanceForType() {
16499        return defaultInstance;
16500      }
16501
16502      private final com.google.protobuf.UnknownFieldSet unknownFields;
16503      @java.lang.Override
16504      public final com.google.protobuf.UnknownFieldSet
16505          getUnknownFields() {
16506        return this.unknownFields;
16507      }
16508      private CreatedListEntry(
16509          com.google.protobuf.CodedInputStream input,
16510          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16511          throws com.google.protobuf.InvalidProtocolBufferException {
16512        initFields();
16513        int mutable_bitField0_ = 0;
16514        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16515            com.google.protobuf.UnknownFieldSet.newBuilder();
16516        try {
16517          boolean done = false;
16518          while (!done) {
16519            int tag = input.readTag();
16520            switch (tag) {
16521              case 0:
16522                done = true;
16523                break;
16524              default: {
16525                if (!parseUnknownField(input, unknownFields,
16526                                       extensionRegistry, tag)) {
16527                  done = true;
16528                }
16529                break;
16530              }
16531              case 10: {
16532                bitField0_ |= 0x00000001;
16533                name_ = input.readBytes();
16534                break;
16535              }
16536            }
16537          }
16538        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16539          throw e.setUnfinishedMessage(this);
16540        } catch (java.io.IOException e) {
16541          throw new com.google.protobuf.InvalidProtocolBufferException(
16542              e.getMessage()).setUnfinishedMessage(this);
16543        } finally {
16544          this.unknownFields = unknownFields.build();
16545          makeExtensionsImmutable();
16546        }
16547      }
16548      public static final com.google.protobuf.Descriptors.Descriptor
16549          getDescriptor() {
16550        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
16551      }
16552
16553      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16554          internalGetFieldAccessorTable() {
16555        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable
16556            .ensureFieldAccessorsInitialized(
16557                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class);
16558      }
16559
16560      public static com.google.protobuf.Parser<CreatedListEntry> PARSER =
16561          new com.google.protobuf.AbstractParser<CreatedListEntry>() {
16562        public CreatedListEntry parsePartialFrom(
16563            com.google.protobuf.CodedInputStream input,
16564            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16565            throws com.google.protobuf.InvalidProtocolBufferException {
16566          return new CreatedListEntry(input, extensionRegistry);
16567        }
16568      };
16569
16570      @java.lang.Override
16571      public com.google.protobuf.Parser<CreatedListEntry> getParserForType() {
16572        return PARSER;
16573      }
16574
16575      private int bitField0_;
16576      // optional bytes name = 1;
16577      public static final int NAME_FIELD_NUMBER = 1;
16578      private com.google.protobuf.ByteString name_;
16579      /**
16580       * <code>optional bytes name = 1;</code>
16581       */
16582      public boolean hasName() {
16583        return ((bitField0_ & 0x00000001) == 0x00000001);
16584      }
16585      /**
16586       * <code>optional bytes name = 1;</code>
16587       */
16588      public com.google.protobuf.ByteString getName() {
16589        return name_;
16590      }
16591
16592      private void initFields() {
16593        name_ = com.google.protobuf.ByteString.EMPTY;
16594      }
16595      private byte memoizedIsInitialized = -1;
16596      public final boolean isInitialized() {
16597        byte isInitialized = memoizedIsInitialized;
16598        if (isInitialized != -1) return isInitialized == 1;
16599
16600        memoizedIsInitialized = 1;
16601        return true;
16602      }
16603
16604      public void writeTo(com.google.protobuf.CodedOutputStream output)
16605                          throws java.io.IOException {
16606        getSerializedSize();
16607        if (((bitField0_ & 0x00000001) == 0x00000001)) {
16608          output.writeBytes(1, name_);
16609        }
16610        getUnknownFields().writeTo(output);
16611      }
16612
16613      private int memoizedSerializedSize = -1;
16614      public int getSerializedSize() {
16615        int size = memoizedSerializedSize;
16616        if (size != -1) return size;
16617
16618        size = 0;
16619        if (((bitField0_ & 0x00000001) == 0x00000001)) {
16620          size += com.google.protobuf.CodedOutputStream
16621            .computeBytesSize(1, name_);
16622        }
16623        size += getUnknownFields().getSerializedSize();
16624        memoizedSerializedSize = size;
16625        return size;
16626      }
16627
16628      private static final long serialVersionUID = 0L;
16629      @java.lang.Override
16630      protected java.lang.Object writeReplace()
16631          throws java.io.ObjectStreamException {
16632        return super.writeReplace();
16633      }
16634
16635      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16636          com.google.protobuf.ByteString data)
16637          throws com.google.protobuf.InvalidProtocolBufferException {
16638        return PARSER.parseFrom(data);
16639      }
16640      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16641          com.google.protobuf.ByteString data,
16642          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16643          throws com.google.protobuf.InvalidProtocolBufferException {
16644        return PARSER.parseFrom(data, extensionRegistry);
16645      }
16646      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(byte[] data)
16647          throws com.google.protobuf.InvalidProtocolBufferException {
16648        return PARSER.parseFrom(data);
16649      }
16650      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16651          byte[] data,
16652          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16653          throws com.google.protobuf.InvalidProtocolBufferException {
16654        return PARSER.parseFrom(data, extensionRegistry);
16655      }
16656      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(java.io.InputStream input)
16657          throws java.io.IOException {
16658        return PARSER.parseFrom(input);
16659      }
16660      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16661          java.io.InputStream input,
16662          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16663          throws java.io.IOException {
16664        return PARSER.parseFrom(input, extensionRegistry);
16665      }
16666      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom(java.io.InputStream input)
16667          throws java.io.IOException {
16668        return PARSER.parseDelimitedFrom(input);
16669      }
16670      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom(
16671          java.io.InputStream input,
16672          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16673          throws java.io.IOException {
16674        return PARSER.parseDelimitedFrom(input, extensionRegistry);
16675      }
16676      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16677          com.google.protobuf.CodedInputStream input)
16678          throws java.io.IOException {
16679        return PARSER.parseFrom(input);
16680      }
16681      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
16682          com.google.protobuf.CodedInputStream input,
16683          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16684          throws java.io.IOException {
16685        return PARSER.parseFrom(input, extensionRegistry);
16686      }
16687
16688      public static Builder newBuilder() { return Builder.create(); }
16689      public Builder newBuilderForType() { return newBuilder(); }
16690      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry prototype) {
16691        return newBuilder().mergeFrom(prototype);
16692      }
16693      public Builder toBuilder() { return newBuilder(this); }
16694
16695      @java.lang.Override
16696      protected Builder newBuilderForType(
16697          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16698        Builder builder = new Builder(parent);
16699        return builder;
16700      }
16701      /**
16702       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry}
16703       */
16704      public static final class Builder extends
16705          com.google.protobuf.GeneratedMessage.Builder<Builder>
16706         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntryOrBuilder {
16707        public static final com.google.protobuf.Descriptors.Descriptor
16708            getDescriptor() {
16709          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
16710        }
16711
16712        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16713            internalGetFieldAccessorTable() {
16714          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable
16715              .ensureFieldAccessorsInitialized(
16716                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class);
16717        }
16718
16719        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.newBuilder()
16720        private Builder() {
16721          maybeForceBuilderInitialization();
16722        }
16723
16724        private Builder(
16725            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16726          super(parent);
16727          maybeForceBuilderInitialization();
16728        }
16729        private void maybeForceBuilderInitialization() {
16730          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16731          }
16732        }
16733        private static Builder create() {
16734          return new Builder();
16735        }
16736
16737        public Builder clear() {
16738          super.clear();
16739          name_ = com.google.protobuf.ByteString.EMPTY;
16740          bitField0_ = (bitField0_ & ~0x00000001);
16741          return this;
16742        }
16743
16744        public Builder clone() {
16745          return create().mergeFrom(buildPartial());
16746        }
16747
16748        public com.google.protobuf.Descriptors.Descriptor
16749            getDescriptorForType() {
16750          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
16751        }
16752
16753        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstanceForType() {
16754          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance();
16755        }
16756
16757        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry build() {
16758          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = buildPartial();
16759          if (!result.isInitialized()) {
16760            throw newUninitializedMessageException(result);
16761          }
16762          return result;
16763        }
16764
16765        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry buildPartial() {
16766          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry(this);
16767          int from_bitField0_ = bitField0_;
16768          int to_bitField0_ = 0;
16769          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16770            to_bitField0_ |= 0x00000001;
16771          }
16772          result.name_ = name_;
16773          result.bitField0_ = to_bitField0_;
16774          onBuilt();
16775          return result;
16776        }
16777
16778        public Builder mergeFrom(com.google.protobuf.Message other) {
16779          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) {
16780            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry)other);
16781          } else {
16782            super.mergeFrom(other);
16783            return this;
16784          }
16785        }
16786
16787        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry other) {
16788          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance()) return this;
16789          if (other.hasName()) {
16790            setName(other.getName());
16791          }
16792          this.mergeUnknownFields(other.getUnknownFields());
16793          return this;
16794        }
16795
16796        public final boolean isInitialized() {
16797          return true;
16798        }
16799
16800        public Builder mergeFrom(
16801            com.google.protobuf.CodedInputStream input,
16802            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16803            throws java.io.IOException {
16804          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parsedMessage = null;
16805          try {
16806            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16807          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16808            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) e.getUnfinishedMessage();
16809            throw e;
16810          } finally {
16811            if (parsedMessage != null) {
16812              mergeFrom(parsedMessage);
16813            }
16814          }
16815          return this;
16816        }
16817        private int bitField0_;
16818
16819        // optional bytes name = 1;
16820        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
16821        /**
16822         * <code>optional bytes name = 1;</code>
16823         */
16824        public boolean hasName() {
16825          return ((bitField0_ & 0x00000001) == 0x00000001);
16826        }
16827        /**
16828         * <code>optional bytes name = 1;</code>
16829         */
16830        public com.google.protobuf.ByteString getName() {
16831          return name_;
16832        }
16833        /**
16834         * <code>optional bytes name = 1;</code>
16835         */
16836        public Builder setName(com.google.protobuf.ByteString value) {
16837          if (value == null) {
16838    throw new NullPointerException();
16839  }
16840  bitField0_ |= 0x00000001;
16841          name_ = value;
16842          onChanged();
16843          return this;
16844        }
16845        /**
16846         * <code>optional bytes name = 1;</code>
16847         */
16848        public Builder clearName() {
16849          bitField0_ = (bitField0_ & ~0x00000001);
16850          name_ = getDefaultInstance().getName();
16851          onChanged();
16852          return this;
16853        }
16854
16855        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
16856      }
16857
16858      static {
16859        defaultInstance = new CreatedListEntry(true);
16860        defaultInstance.initFields();
16861      }
16862
16863      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
16864    }
16865
16866    public interface DirectoryDiffOrBuilder
16867        extends com.google.protobuf.MessageOrBuilder {
16868
16869      // optional uint32 snapshotId = 1;
16870      /**
16871       * <code>optional uint32 snapshotId = 1;</code>
16872       */
16873      boolean hasSnapshotId();
16874      /**
16875       * <code>optional uint32 snapshotId = 1;</code>
16876       */
16877      int getSnapshotId();
16878
16879      // optional uint32 childrenSize = 2;
16880      /**
16881       * <code>optional uint32 childrenSize = 2;</code>
16882       */
16883      boolean hasChildrenSize();
16884      /**
16885       * <code>optional uint32 childrenSize = 2;</code>
16886       */
16887      int getChildrenSize();
16888
16889      // optional bool isSnapshotRoot = 3;
16890      /**
16891       * <code>optional bool isSnapshotRoot = 3;</code>
16892       */
16893      boolean hasIsSnapshotRoot();
16894      /**
16895       * <code>optional bool isSnapshotRoot = 3;</code>
16896       */
16897      boolean getIsSnapshotRoot();
16898
16899      // optional bytes name = 4;
16900      /**
16901       * <code>optional bytes name = 4;</code>
16902       */
16903      boolean hasName();
16904      /**
16905       * <code>optional bytes name = 4;</code>
16906       */
16907      com.google.protobuf.ByteString getName();
16908
16909      // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;
16910      /**
16911       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
16912       */
16913      boolean hasSnapshotCopy();
16914      /**
16915       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
16916       */
16917      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy();
16918      /**
16919       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
16920       */
16921      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder();
16922
16923      // optional uint32 createdListSize = 6;
16924      /**
16925       * <code>optional uint32 createdListSize = 6;</code>
16926       */
16927      boolean hasCreatedListSize();
16928      /**
16929       * <code>optional uint32 createdListSize = 6;</code>
16930       */
16931      int getCreatedListSize();
16932
16933      // repeated uint64 deletedINode = 7 [packed = true];
16934      /**
16935       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
16936       *
16937       * <pre>
16938       * id of deleted inodes
16939       * </pre>
16940       */
16941      java.util.List<java.lang.Long> getDeletedINodeList();
16942      /**
16943       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
16944       *
16945       * <pre>
16946       * id of deleted inodes
16947       * </pre>
16948       */
16949      int getDeletedINodeCount();
16950      /**
16951       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
16952       *
16953       * <pre>
16954       * id of deleted inodes
16955       * </pre>
16956       */
16957      long getDeletedINode(int index);
16958
16959      // repeated uint32 deletedINodeRef = 8 [packed = true];
16960      /**
16961       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
16962       *
16963       * <pre>
16964       * id of reference nodes in the deleted list
16965       * </pre>
16966       */
16967      java.util.List<java.lang.Integer> getDeletedINodeRefList();
16968      /**
16969       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
16970       *
16971       * <pre>
16972       * id of reference nodes in the deleted list
16973       * </pre>
16974       */
16975      int getDeletedINodeRefCount();
16976      /**
16977       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
16978       *
16979       * <pre>
16980       * id of reference nodes in the deleted list
16981       * </pre>
16982       */
16983      int getDeletedINodeRef(int index);
16984    }
16985    /**
16986     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff}
16987     */
16988    public static final class DirectoryDiff extends
16989        com.google.protobuf.GeneratedMessage
16990        implements DirectoryDiffOrBuilder {
16991      // Use DirectoryDiff.newBuilder() to construct.
16992      private DirectoryDiff(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16993        super(builder);
16994        this.unknownFields = builder.getUnknownFields();
16995      }
16996      private DirectoryDiff(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16997
16998      private static final DirectoryDiff defaultInstance;
16999      public static DirectoryDiff getDefaultInstance() {
17000        return defaultInstance;
17001      }
17002
17003      public DirectoryDiff getDefaultInstanceForType() {
17004        return defaultInstance;
17005      }
17006
17007      private final com.google.protobuf.UnknownFieldSet unknownFields;
17008      @java.lang.Override
17009      public final com.google.protobuf.UnknownFieldSet
17010          getUnknownFields() {
17011        return this.unknownFields;
17012      }
17013      private DirectoryDiff(
17014          com.google.protobuf.CodedInputStream input,
17015          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17016          throws com.google.protobuf.InvalidProtocolBufferException {
17017        initFields();
17018        int mutable_bitField0_ = 0;
17019        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
17020            com.google.protobuf.UnknownFieldSet.newBuilder();
17021        try {
17022          boolean done = false;
17023          while (!done) {
17024            int tag = input.readTag();
17025            switch (tag) {
17026              case 0:
17027                done = true;
17028                break;
17029              default: {
17030                if (!parseUnknownField(input, unknownFields,
17031                                       extensionRegistry, tag)) {
17032                  done = true;
17033                }
17034                break;
17035              }
17036              case 8: {
17037                bitField0_ |= 0x00000001;
17038                snapshotId_ = input.readUInt32();
17039                break;
17040              }
17041              case 16: {
17042                bitField0_ |= 0x00000002;
17043                childrenSize_ = input.readUInt32();
17044                break;
17045              }
17046              case 24: {
17047                bitField0_ |= 0x00000004;
17048                isSnapshotRoot_ = input.readBool();
17049                break;
17050              }
17051              case 34: {
17052                bitField0_ |= 0x00000008;
17053                name_ = input.readBytes();
17054                break;
17055              }
17056              case 42: {
17057                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder subBuilder = null;
17058                if (((bitField0_ & 0x00000010) == 0x00000010)) {
17059                  subBuilder = snapshotCopy_.toBuilder();
17060                }
17061                snapshotCopy_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.PARSER, extensionRegistry);
17062                if (subBuilder != null) {
17063                  subBuilder.mergeFrom(snapshotCopy_);
17064                  snapshotCopy_ = subBuilder.buildPartial();
17065                }
17066                bitField0_ |= 0x00000010;
17067                break;
17068              }
17069              case 48: {
17070                bitField0_ |= 0x00000020;
17071                createdListSize_ = input.readUInt32();
17072                break;
17073              }
17074              case 56: {
17075                if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
17076                  deletedINode_ = new java.util.ArrayList<java.lang.Long>();
17077                  mutable_bitField0_ |= 0x00000040;
17078                }
17079                deletedINode_.add(input.readUInt64());
17080                break;
17081              }
17082              case 58: {
17083                int length = input.readRawVarint32();
17084                int limit = input.pushLimit(length);
17085                if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) {
17086                  deletedINode_ = new java.util.ArrayList<java.lang.Long>();
17087                  mutable_bitField0_ |= 0x00000040;
17088                }
17089                while (input.getBytesUntilLimit() > 0) {
17090                  deletedINode_.add(input.readUInt64());
17091                }
17092                input.popLimit(limit);
17093                break;
17094              }
17095              case 64: {
17096                if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
17097                  deletedINodeRef_ = new java.util.ArrayList<java.lang.Integer>();
17098                  mutable_bitField0_ |= 0x00000080;
17099                }
17100                deletedINodeRef_.add(input.readUInt32());
17101                break;
17102              }
17103              case 66: {
17104                int length = input.readRawVarint32();
17105                int limit = input.pushLimit(length);
17106                if (!((mutable_bitField0_ & 0x00000080) == 0x00000080) && input.getBytesUntilLimit() > 0) {
17107                  deletedINodeRef_ = new java.util.ArrayList<java.lang.Integer>();
17108                  mutable_bitField0_ |= 0x00000080;
17109                }
17110                while (input.getBytesUntilLimit() > 0) {
17111                  deletedINodeRef_.add(input.readUInt32());
17112                }
17113                input.popLimit(limit);
17114                break;
17115              }
17116            }
17117          }
17118        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17119          throw e.setUnfinishedMessage(this);
17120        } catch (java.io.IOException e) {
17121          throw new com.google.protobuf.InvalidProtocolBufferException(
17122              e.getMessage()).setUnfinishedMessage(this);
17123        } finally {
17124          if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
17125            deletedINode_ = java.util.Collections.unmodifiableList(deletedINode_);
17126          }
17127          if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
17128            deletedINodeRef_ = java.util.Collections.unmodifiableList(deletedINodeRef_);
17129          }
17130          this.unknownFields = unknownFields.build();
17131          makeExtensionsImmutable();
17132        }
17133      }
17134      public static final com.google.protobuf.Descriptors.Descriptor
17135          getDescriptor() {
17136        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
17137      }
17138
17139      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17140          internalGetFieldAccessorTable() {
17141        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable
17142            .ensureFieldAccessorsInitialized(
17143                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class);
17144      }
17145
17146      public static com.google.protobuf.Parser<DirectoryDiff> PARSER =
17147          new com.google.protobuf.AbstractParser<DirectoryDiff>() {
17148        public DirectoryDiff parsePartialFrom(
17149            com.google.protobuf.CodedInputStream input,
17150            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17151            throws com.google.protobuf.InvalidProtocolBufferException {
17152          return new DirectoryDiff(input, extensionRegistry);
17153        }
17154      };
17155
17156      @java.lang.Override
17157      public com.google.protobuf.Parser<DirectoryDiff> getParserForType() {
17158        return PARSER;
17159      }
17160
17161      private int bitField0_;
17162      // optional uint32 snapshotId = 1;
17163      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
17164      private int snapshotId_;
17165      /**
17166       * <code>optional uint32 snapshotId = 1;</code>
17167       */
17168      public boolean hasSnapshotId() {
17169        return ((bitField0_ & 0x00000001) == 0x00000001);
17170      }
17171      /**
17172       * <code>optional uint32 snapshotId = 1;</code>
17173       */
17174      public int getSnapshotId() {
17175        return snapshotId_;
17176      }
17177
17178      // optional uint32 childrenSize = 2;
17179      public static final int CHILDRENSIZE_FIELD_NUMBER = 2;
17180      private int childrenSize_;
17181      /**
17182       * <code>optional uint32 childrenSize = 2;</code>
17183       */
17184      public boolean hasChildrenSize() {
17185        return ((bitField0_ & 0x00000002) == 0x00000002);
17186      }
17187      /**
17188       * <code>optional uint32 childrenSize = 2;</code>
17189       */
17190      public int getChildrenSize() {
17191        return childrenSize_;
17192      }
17193
17194      // optional bool isSnapshotRoot = 3;
17195      public static final int ISSNAPSHOTROOT_FIELD_NUMBER = 3;
17196      private boolean isSnapshotRoot_;
17197      /**
17198       * <code>optional bool isSnapshotRoot = 3;</code>
17199       */
17200      public boolean hasIsSnapshotRoot() {
17201        return ((bitField0_ & 0x00000004) == 0x00000004);
17202      }
17203      /**
17204       * <code>optional bool isSnapshotRoot = 3;</code>
17205       */
17206      public boolean getIsSnapshotRoot() {
17207        return isSnapshotRoot_;
17208      }
17209
17210      // optional bytes name = 4;
17211      public static final int NAME_FIELD_NUMBER = 4;
17212      private com.google.protobuf.ByteString name_;
17213      /**
17214       * <code>optional bytes name = 4;</code>
17215       */
17216      public boolean hasName() {
17217        return ((bitField0_ & 0x00000008) == 0x00000008);
17218      }
17219      /**
17220       * <code>optional bytes name = 4;</code>
17221       */
17222      public com.google.protobuf.ByteString getName() {
17223        return name_;
17224      }
17225
17226      // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;
17227      public static final int SNAPSHOTCOPY_FIELD_NUMBER = 5;
17228      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_;
17229      /**
17230       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17231       */
17232      public boolean hasSnapshotCopy() {
17233        return ((bitField0_ & 0x00000010) == 0x00000010);
17234      }
17235      /**
17236       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17237       */
17238      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() {
17239        return snapshotCopy_;
17240      }
17241      /**
17242       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17243       */
17244      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() {
17245        return snapshotCopy_;
17246      }
17247
17248      // optional uint32 createdListSize = 6;
17249      public static final int CREATEDLISTSIZE_FIELD_NUMBER = 6;
17250      private int createdListSize_;
17251      /**
17252       * <code>optional uint32 createdListSize = 6;</code>
17253       */
17254      public boolean hasCreatedListSize() {
17255        return ((bitField0_ & 0x00000020) == 0x00000020);
17256      }
17257      /**
17258       * <code>optional uint32 createdListSize = 6;</code>
17259       */
17260      public int getCreatedListSize() {
17261        return createdListSize_;
17262      }
17263
17264      // repeated uint64 deletedINode = 7 [packed = true];
17265      public static final int DELETEDINODE_FIELD_NUMBER = 7;
17266      private java.util.List<java.lang.Long> deletedINode_;
17267      /**
17268       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
17269       *
17270       * <pre>
17271       * id of deleted inodes
17272       * </pre>
17273       */
17274      public java.util.List<java.lang.Long>
17275          getDeletedINodeList() {
17276        return deletedINode_;
17277      }
17278      /**
17279       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
17280       *
17281       * <pre>
17282       * id of deleted inodes
17283       * </pre>
17284       */
17285      public int getDeletedINodeCount() {
17286        return deletedINode_.size();
17287      }
17288      /**
17289       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
17290       *
17291       * <pre>
17292       * id of deleted inodes
17293       * </pre>
17294       */
17295      public long getDeletedINode(int index) {
17296        return deletedINode_.get(index);
17297      }
17298      private int deletedINodeMemoizedSerializedSize = -1;
17299
17300      // repeated uint32 deletedINodeRef = 8 [packed = true];
17301      public static final int DELETEDINODEREF_FIELD_NUMBER = 8;
17302      private java.util.List<java.lang.Integer> deletedINodeRef_;
17303      /**
17304       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
17305       *
17306       * <pre>
17307       * id of reference nodes in the deleted list
17308       * </pre>
17309       */
17310      public java.util.List<java.lang.Integer>
17311          getDeletedINodeRefList() {
17312        return deletedINodeRef_;
17313      }
17314      /**
17315       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
17316       *
17317       * <pre>
17318       * id of reference nodes in the deleted list
17319       * </pre>
17320       */
17321      public int getDeletedINodeRefCount() {
17322        return deletedINodeRef_.size();
17323      }
17324      /**
17325       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
17326       *
17327       * <pre>
17328       * id of reference nodes in the deleted list
17329       * </pre>
17330       */
17331      public int getDeletedINodeRef(int index) {
17332        return deletedINodeRef_.get(index);
17333      }
17334      private int deletedINodeRefMemoizedSerializedSize = -1;
17335
17336      private void initFields() {
17337        snapshotId_ = 0;
17338        childrenSize_ = 0;
17339        isSnapshotRoot_ = false;
17340        name_ = com.google.protobuf.ByteString.EMPTY;
17341        snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
17342        createdListSize_ = 0;
17343        deletedINode_ = java.util.Collections.emptyList();
17344        deletedINodeRef_ = java.util.Collections.emptyList();
17345      }
17346      private byte memoizedIsInitialized = -1;
17347      public final boolean isInitialized() {
17348        byte isInitialized = memoizedIsInitialized;
17349        if (isInitialized != -1) return isInitialized == 1;
17350
17351        if (hasSnapshotCopy()) {
17352          if (!getSnapshotCopy().isInitialized()) {
17353            memoizedIsInitialized = 0;
17354            return false;
17355          }
17356        }
17357        memoizedIsInitialized = 1;
17358        return true;
17359      }
17360
17361      public void writeTo(com.google.protobuf.CodedOutputStream output)
17362                          throws java.io.IOException {
17363        getSerializedSize();
17364        if (((bitField0_ & 0x00000001) == 0x00000001)) {
17365          output.writeUInt32(1, snapshotId_);
17366        }
17367        if (((bitField0_ & 0x00000002) == 0x00000002)) {
17368          output.writeUInt32(2, childrenSize_);
17369        }
17370        if (((bitField0_ & 0x00000004) == 0x00000004)) {
17371          output.writeBool(3, isSnapshotRoot_);
17372        }
17373        if (((bitField0_ & 0x00000008) == 0x00000008)) {
17374          output.writeBytes(4, name_);
17375        }
17376        if (((bitField0_ & 0x00000010) == 0x00000010)) {
17377          output.writeMessage(5, snapshotCopy_);
17378        }
17379        if (((bitField0_ & 0x00000020) == 0x00000020)) {
17380          output.writeUInt32(6, createdListSize_);
17381        }
17382        if (getDeletedINodeList().size() > 0) {
17383          output.writeRawVarint32(58);
17384          output.writeRawVarint32(deletedINodeMemoizedSerializedSize);
17385        }
17386        for (int i = 0; i < deletedINode_.size(); i++) {
17387          output.writeUInt64NoTag(deletedINode_.get(i));
17388        }
17389        if (getDeletedINodeRefList().size() > 0) {
17390          output.writeRawVarint32(66);
17391          output.writeRawVarint32(deletedINodeRefMemoizedSerializedSize);
17392        }
17393        for (int i = 0; i < deletedINodeRef_.size(); i++) {
17394          output.writeUInt32NoTag(deletedINodeRef_.get(i));
17395        }
17396        getUnknownFields().writeTo(output);
17397      }
17398
17399      private int memoizedSerializedSize = -1;
17400      public int getSerializedSize() {
17401        int size = memoizedSerializedSize;
17402        if (size != -1) return size;
17403
17404        size = 0;
17405        if (((bitField0_ & 0x00000001) == 0x00000001)) {
17406          size += com.google.protobuf.CodedOutputStream
17407            .computeUInt32Size(1, snapshotId_);
17408        }
17409        if (((bitField0_ & 0x00000002) == 0x00000002)) {
17410          size += com.google.protobuf.CodedOutputStream
17411            .computeUInt32Size(2, childrenSize_);
17412        }
17413        if (((bitField0_ & 0x00000004) == 0x00000004)) {
17414          size += com.google.protobuf.CodedOutputStream
17415            .computeBoolSize(3, isSnapshotRoot_);
17416        }
17417        if (((bitField0_ & 0x00000008) == 0x00000008)) {
17418          size += com.google.protobuf.CodedOutputStream
17419            .computeBytesSize(4, name_);
17420        }
17421        if (((bitField0_ & 0x00000010) == 0x00000010)) {
17422          size += com.google.protobuf.CodedOutputStream
17423            .computeMessageSize(5, snapshotCopy_);
17424        }
17425        if (((bitField0_ & 0x00000020) == 0x00000020)) {
17426          size += com.google.protobuf.CodedOutputStream
17427            .computeUInt32Size(6, createdListSize_);
17428        }
17429        {
17430          int dataSize = 0;
17431          for (int i = 0; i < deletedINode_.size(); i++) {
17432            dataSize += com.google.protobuf.CodedOutputStream
17433              .computeUInt64SizeNoTag(deletedINode_.get(i));
17434          }
17435          size += dataSize;
17436          if (!getDeletedINodeList().isEmpty()) {
17437            size += 1;
17438            size += com.google.protobuf.CodedOutputStream
17439                .computeInt32SizeNoTag(dataSize);
17440          }
17441          deletedINodeMemoizedSerializedSize = dataSize;
17442        }
17443        {
17444          int dataSize = 0;
17445          for (int i = 0; i < deletedINodeRef_.size(); i++) {
17446            dataSize += com.google.protobuf.CodedOutputStream
17447              .computeUInt32SizeNoTag(deletedINodeRef_.get(i));
17448          }
17449          size += dataSize;
17450          if (!getDeletedINodeRefList().isEmpty()) {
17451            size += 1;
17452            size += com.google.protobuf.CodedOutputStream
17453                .computeInt32SizeNoTag(dataSize);
17454          }
17455          deletedINodeRefMemoizedSerializedSize = dataSize;
17456        }
17457        size += getUnknownFields().getSerializedSize();
17458        memoizedSerializedSize = size;
17459        return size;
17460      }
17461
17462      private static final long serialVersionUID = 0L;
17463      @java.lang.Override
17464      protected java.lang.Object writeReplace()
17465          throws java.io.ObjectStreamException {
17466        return super.writeReplace();
17467      }
17468
17469      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17470          com.google.protobuf.ByteString data)
17471          throws com.google.protobuf.InvalidProtocolBufferException {
17472        return PARSER.parseFrom(data);
17473      }
17474      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17475          com.google.protobuf.ByteString data,
17476          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17477          throws com.google.protobuf.InvalidProtocolBufferException {
17478        return PARSER.parseFrom(data, extensionRegistry);
17479      }
17480      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(byte[] data)
17481          throws com.google.protobuf.InvalidProtocolBufferException {
17482        return PARSER.parseFrom(data);
17483      }
17484      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17485          byte[] data,
17486          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17487          throws com.google.protobuf.InvalidProtocolBufferException {
17488        return PARSER.parseFrom(data, extensionRegistry);
17489      }
17490      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(java.io.InputStream input)
17491          throws java.io.IOException {
17492        return PARSER.parseFrom(input);
17493      }
17494      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17495          java.io.InputStream input,
17496          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17497          throws java.io.IOException {
17498        return PARSER.parseFrom(input, extensionRegistry);
17499      }
17500      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom(java.io.InputStream input)
17501          throws java.io.IOException {
17502        return PARSER.parseDelimitedFrom(input);
17503      }
17504      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom(
17505          java.io.InputStream input,
17506          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17507          throws java.io.IOException {
17508        return PARSER.parseDelimitedFrom(input, extensionRegistry);
17509      }
17510      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17511          com.google.protobuf.CodedInputStream input)
17512          throws java.io.IOException {
17513        return PARSER.parseFrom(input);
17514      }
17515      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
17516          com.google.protobuf.CodedInputStream input,
17517          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17518          throws java.io.IOException {
17519        return PARSER.parseFrom(input, extensionRegistry);
17520      }
17521
17522      public static Builder newBuilder() { return Builder.create(); }
17523      public Builder newBuilderForType() { return newBuilder(); }
17524      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff prototype) {
17525        return newBuilder().mergeFrom(prototype);
17526      }
17527      public Builder toBuilder() { return newBuilder(this); }
17528
17529      @java.lang.Override
17530      protected Builder newBuilderForType(
17531          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17532        Builder builder = new Builder(parent);
17533        return builder;
17534      }
17535      /**
17536       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff}
17537       */
17538      public static final class Builder extends
17539          com.google.protobuf.GeneratedMessage.Builder<Builder>
17540         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiffOrBuilder {
17541        public static final com.google.protobuf.Descriptors.Descriptor
17542            getDescriptor() {
17543          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
17544        }
17545
17546        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17547            internalGetFieldAccessorTable() {
17548          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable
17549              .ensureFieldAccessorsInitialized(
17550                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class);
17551        }
17552
17553        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder()
17554        private Builder() {
17555          maybeForceBuilderInitialization();
17556        }
17557
17558        private Builder(
17559            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17560          super(parent);
17561          maybeForceBuilderInitialization();
17562        }
17563        private void maybeForceBuilderInitialization() {
17564          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17565            getSnapshotCopyFieldBuilder();
17566          }
17567        }
17568        private static Builder create() {
17569          return new Builder();
17570        }
17571
17572        public Builder clear() {
17573          super.clear();
17574          snapshotId_ = 0;
17575          bitField0_ = (bitField0_ & ~0x00000001);
17576          childrenSize_ = 0;
17577          bitField0_ = (bitField0_ & ~0x00000002);
17578          isSnapshotRoot_ = false;
17579          bitField0_ = (bitField0_ & ~0x00000004);
17580          name_ = com.google.protobuf.ByteString.EMPTY;
17581          bitField0_ = (bitField0_ & ~0x00000008);
17582          if (snapshotCopyBuilder_ == null) {
17583            snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
17584          } else {
17585            snapshotCopyBuilder_.clear();
17586          }
17587          bitField0_ = (bitField0_ & ~0x00000010);
17588          createdListSize_ = 0;
17589          bitField0_ = (bitField0_ & ~0x00000020);
17590          deletedINode_ = java.util.Collections.emptyList();
17591          bitField0_ = (bitField0_ & ~0x00000040);
17592          deletedINodeRef_ = java.util.Collections.emptyList();
17593          bitField0_ = (bitField0_ & ~0x00000080);
17594          return this;
17595        }
17596
17597        public Builder clone() {
17598          return create().mergeFrom(buildPartial());
17599        }
17600
17601        public com.google.protobuf.Descriptors.Descriptor
17602            getDescriptorForType() {
17603          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
17604        }
17605
17606        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstanceForType() {
17607          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance();
17608        }
17609
17610        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff build() {
17611          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = buildPartial();
17612          if (!result.isInitialized()) {
17613            throw newUninitializedMessageException(result);
17614          }
17615          return result;
17616        }
17617
17618        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff buildPartial() {
17619          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff(this);
17620          int from_bitField0_ = bitField0_;
17621          int to_bitField0_ = 0;
17622          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
17623            to_bitField0_ |= 0x00000001;
17624          }
17625          result.snapshotId_ = snapshotId_;
17626          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
17627            to_bitField0_ |= 0x00000002;
17628          }
17629          result.childrenSize_ = childrenSize_;
17630          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
17631            to_bitField0_ |= 0x00000004;
17632          }
17633          result.isSnapshotRoot_ = isSnapshotRoot_;
17634          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
17635            to_bitField0_ |= 0x00000008;
17636          }
17637          result.name_ = name_;
17638          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
17639            to_bitField0_ |= 0x00000010;
17640          }
17641          if (snapshotCopyBuilder_ == null) {
17642            result.snapshotCopy_ = snapshotCopy_;
17643          } else {
17644            result.snapshotCopy_ = snapshotCopyBuilder_.build();
17645          }
17646          if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
17647            to_bitField0_ |= 0x00000020;
17648          }
17649          result.createdListSize_ = createdListSize_;
17650          if (((bitField0_ & 0x00000040) == 0x00000040)) {
17651            deletedINode_ = java.util.Collections.unmodifiableList(deletedINode_);
17652            bitField0_ = (bitField0_ & ~0x00000040);
17653          }
17654          result.deletedINode_ = deletedINode_;
17655          if (((bitField0_ & 0x00000080) == 0x00000080)) {
17656            deletedINodeRef_ = java.util.Collections.unmodifiableList(deletedINodeRef_);
17657            bitField0_ = (bitField0_ & ~0x00000080);
17658          }
17659          result.deletedINodeRef_ = deletedINodeRef_;
17660          result.bitField0_ = to_bitField0_;
17661          onBuilt();
17662          return result;
17663        }
17664
17665        public Builder mergeFrom(com.google.protobuf.Message other) {
17666          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) {
17667            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff)other);
17668          } else {
17669            super.mergeFrom(other);
17670            return this;
17671          }
17672        }
17673
17674        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff other) {
17675          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance()) return this;
17676          if (other.hasSnapshotId()) {
17677            setSnapshotId(other.getSnapshotId());
17678          }
17679          if (other.hasChildrenSize()) {
17680            setChildrenSize(other.getChildrenSize());
17681          }
17682          if (other.hasIsSnapshotRoot()) {
17683            setIsSnapshotRoot(other.getIsSnapshotRoot());
17684          }
17685          if (other.hasName()) {
17686            setName(other.getName());
17687          }
17688          if (other.hasSnapshotCopy()) {
17689            mergeSnapshotCopy(other.getSnapshotCopy());
17690          }
17691          if (other.hasCreatedListSize()) {
17692            setCreatedListSize(other.getCreatedListSize());
17693          }
17694          if (!other.deletedINode_.isEmpty()) {
17695            if (deletedINode_.isEmpty()) {
17696              deletedINode_ = other.deletedINode_;
17697              bitField0_ = (bitField0_ & ~0x00000040);
17698            } else {
17699              ensureDeletedINodeIsMutable();
17700              deletedINode_.addAll(other.deletedINode_);
17701            }
17702            onChanged();
17703          }
17704          if (!other.deletedINodeRef_.isEmpty()) {
17705            if (deletedINodeRef_.isEmpty()) {
17706              deletedINodeRef_ = other.deletedINodeRef_;
17707              bitField0_ = (bitField0_ & ~0x00000080);
17708            } else {
17709              ensureDeletedINodeRefIsMutable();
17710              deletedINodeRef_.addAll(other.deletedINodeRef_);
17711            }
17712            onChanged();
17713          }
17714          this.mergeUnknownFields(other.getUnknownFields());
17715          return this;
17716        }
17717
17718        public final boolean isInitialized() {
17719          if (hasSnapshotCopy()) {
17720            if (!getSnapshotCopy().isInitialized()) {
17721              
17722              return false;
17723            }
17724          }
17725          return true;
17726        }
17727
17728        public Builder mergeFrom(
17729            com.google.protobuf.CodedInputStream input,
17730            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17731            throws java.io.IOException {
17732          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parsedMessage = null;
17733          try {
17734            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17735          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17736            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) e.getUnfinishedMessage();
17737            throw e;
17738          } finally {
17739            if (parsedMessage != null) {
17740              mergeFrom(parsedMessage);
17741            }
17742          }
17743          return this;
17744        }
17745        private int bitField0_;
17746
17747        // optional uint32 snapshotId = 1;
17748        private int snapshotId_ ;
17749        /**
17750         * <code>optional uint32 snapshotId = 1;</code>
17751         */
17752        public boolean hasSnapshotId() {
17753          return ((bitField0_ & 0x00000001) == 0x00000001);
17754        }
17755        /**
17756         * <code>optional uint32 snapshotId = 1;</code>
17757         */
17758        public int getSnapshotId() {
17759          return snapshotId_;
17760        }
17761        /**
17762         * <code>optional uint32 snapshotId = 1;</code>
17763         */
17764        public Builder setSnapshotId(int value) {
17765          bitField0_ |= 0x00000001;
17766          snapshotId_ = value;
17767          onChanged();
17768          return this;
17769        }
17770        /**
17771         * <code>optional uint32 snapshotId = 1;</code>
17772         */
17773        public Builder clearSnapshotId() {
17774          bitField0_ = (bitField0_ & ~0x00000001);
17775          snapshotId_ = 0;
17776          onChanged();
17777          return this;
17778        }
17779
17780        // optional uint32 childrenSize = 2;
17781        private int childrenSize_ ;
17782        /**
17783         * <code>optional uint32 childrenSize = 2;</code>
17784         */
17785        public boolean hasChildrenSize() {
17786          return ((bitField0_ & 0x00000002) == 0x00000002);
17787        }
17788        /**
17789         * <code>optional uint32 childrenSize = 2;</code>
17790         */
17791        public int getChildrenSize() {
17792          return childrenSize_;
17793        }
17794        /**
17795         * <code>optional uint32 childrenSize = 2;</code>
17796         */
17797        public Builder setChildrenSize(int value) {
17798          bitField0_ |= 0x00000002;
17799          childrenSize_ = value;
17800          onChanged();
17801          return this;
17802        }
17803        /**
17804         * <code>optional uint32 childrenSize = 2;</code>
17805         */
17806        public Builder clearChildrenSize() {
17807          bitField0_ = (bitField0_ & ~0x00000002);
17808          childrenSize_ = 0;
17809          onChanged();
17810          return this;
17811        }
17812
17813        // optional bool isSnapshotRoot = 3;
17814        private boolean isSnapshotRoot_ ;
17815        /**
17816         * <code>optional bool isSnapshotRoot = 3;</code>
17817         */
17818        public boolean hasIsSnapshotRoot() {
17819          return ((bitField0_ & 0x00000004) == 0x00000004);
17820        }
17821        /**
17822         * <code>optional bool isSnapshotRoot = 3;</code>
17823         */
17824        public boolean getIsSnapshotRoot() {
17825          return isSnapshotRoot_;
17826        }
17827        /**
17828         * <code>optional bool isSnapshotRoot = 3;</code>
17829         */
17830        public Builder setIsSnapshotRoot(boolean value) {
17831          bitField0_ |= 0x00000004;
17832          isSnapshotRoot_ = value;
17833          onChanged();
17834          return this;
17835        }
17836        /**
17837         * <code>optional bool isSnapshotRoot = 3;</code>
17838         */
17839        public Builder clearIsSnapshotRoot() {
17840          bitField0_ = (bitField0_ & ~0x00000004);
17841          isSnapshotRoot_ = false;
17842          onChanged();
17843          return this;
17844        }
17845
17846        // optional bytes name = 4;
17847        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
17848        /**
17849         * <code>optional bytes name = 4;</code>
17850         */
17851        public boolean hasName() {
17852          return ((bitField0_ & 0x00000008) == 0x00000008);
17853        }
17854        /**
17855         * <code>optional bytes name = 4;</code>
17856         */
17857        public com.google.protobuf.ByteString getName() {
17858          return name_;
17859        }
17860        /**
17861         * <code>optional bytes name = 4;</code>
17862         */
17863        public Builder setName(com.google.protobuf.ByteString value) {
17864          if (value == null) {
17865    throw new NullPointerException();
17866  }
17867  bitField0_ |= 0x00000008;
17868          name_ = value;
17869          onChanged();
17870          return this;
17871        }
17872        /**
17873         * <code>optional bytes name = 4;</code>
17874         */
17875        public Builder clearName() {
17876          bitField0_ = (bitField0_ & ~0x00000008);
17877          name_ = getDefaultInstance().getName();
17878          onChanged();
17879          return this;
17880        }
17881
17882        // optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;
17883        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
17884        private com.google.protobuf.SingleFieldBuilder<
17885            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> snapshotCopyBuilder_;
17886        /**
17887         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17888         */
17889        public boolean hasSnapshotCopy() {
17890          return ((bitField0_ & 0x00000010) == 0x00000010);
17891        }
17892        /**
17893         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17894         */
17895        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() {
17896          if (snapshotCopyBuilder_ == null) {
17897            return snapshotCopy_;
17898          } else {
17899            return snapshotCopyBuilder_.getMessage();
17900          }
17901        }
17902        /**
17903         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17904         */
17905        public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
17906          if (snapshotCopyBuilder_ == null) {
17907            if (value == null) {
17908              throw new NullPointerException();
17909            }
17910            snapshotCopy_ = value;
17911            onChanged();
17912          } else {
17913            snapshotCopyBuilder_.setMessage(value);
17914          }
17915          bitField0_ |= 0x00000010;
17916          return this;
17917        }
17918        /**
17919         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17920         */
17921        public Builder setSnapshotCopy(
17922            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) {
17923          if (snapshotCopyBuilder_ == null) {
17924            snapshotCopy_ = builderForValue.build();
17925            onChanged();
17926          } else {
17927            snapshotCopyBuilder_.setMessage(builderForValue.build());
17928          }
17929          bitField0_ |= 0x00000010;
17930          return this;
17931        }
17932        /**
17933         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17934         */
17935        public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
17936          if (snapshotCopyBuilder_ == null) {
17937            if (((bitField0_ & 0x00000010) == 0x00000010) &&
17938                snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) {
17939              snapshotCopy_ =
17940                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
17941            } else {
17942              snapshotCopy_ = value;
17943            }
17944            onChanged();
17945          } else {
17946            snapshotCopyBuilder_.mergeFrom(value);
17947          }
17948          bitField0_ |= 0x00000010;
17949          return this;
17950        }
17951        /**
17952         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17953         */
17954        public Builder clearSnapshotCopy() {
17955          if (snapshotCopyBuilder_ == null) {
17956            snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
17957            onChanged();
17958          } else {
17959            snapshotCopyBuilder_.clear();
17960          }
17961          bitField0_ = (bitField0_ & ~0x00000010);
17962          return this;
17963        }
17964        /**
17965         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17966         */
17967        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getSnapshotCopyBuilder() {
17968          bitField0_ |= 0x00000010;
17969          onChanged();
17970          return getSnapshotCopyFieldBuilder().getBuilder();
17971        }
17972        /**
17973         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17974         */
17975        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() {
17976          if (snapshotCopyBuilder_ != null) {
17977            return snapshotCopyBuilder_.getMessageOrBuilder();
17978          } else {
17979            return snapshotCopy_;
17980          }
17981        }
17982        /**
17983         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
17984         */
17985        private com.google.protobuf.SingleFieldBuilder<
17986            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> 
17987            getSnapshotCopyFieldBuilder() {
17988          if (snapshotCopyBuilder_ == null) {
17989            snapshotCopyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
17990                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>(
17991                    snapshotCopy_,
17992                    getParentForChildren(),
17993                    isClean());
17994            snapshotCopy_ = null;
17995          }
17996          return snapshotCopyBuilder_;
17997        }
17998
17999        // optional uint32 createdListSize = 6;
18000        private int createdListSize_ ;
18001        /**
18002         * <code>optional uint32 createdListSize = 6;</code>
18003         */
18004        public boolean hasCreatedListSize() {
18005          return ((bitField0_ & 0x00000020) == 0x00000020);
18006        }
18007        /**
18008         * <code>optional uint32 createdListSize = 6;</code>
18009         */
18010        public int getCreatedListSize() {
18011          return createdListSize_;
18012        }
18013        /**
18014         * <code>optional uint32 createdListSize = 6;</code>
18015         */
18016        public Builder setCreatedListSize(int value) {
18017          bitField0_ |= 0x00000020;
18018          createdListSize_ = value;
18019          onChanged();
18020          return this;
18021        }
18022        /**
18023         * <code>optional uint32 createdListSize = 6;</code>
18024         */
18025        public Builder clearCreatedListSize() {
18026          bitField0_ = (bitField0_ & ~0x00000020);
18027          createdListSize_ = 0;
18028          onChanged();
18029          return this;
18030        }
18031
18032        // repeated uint64 deletedINode = 7 [packed = true];
18033        private java.util.List<java.lang.Long> deletedINode_ = java.util.Collections.emptyList();
18034        private void ensureDeletedINodeIsMutable() {
18035          if (!((bitField0_ & 0x00000040) == 0x00000040)) {
18036            deletedINode_ = new java.util.ArrayList<java.lang.Long>(deletedINode_);
18037            bitField0_ |= 0x00000040;
18038           }
18039        }
18040        /**
18041         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18042         *
18043         * <pre>
18044         * id of deleted inodes
18045         * </pre>
18046         */
18047        public java.util.List<java.lang.Long>
18048            getDeletedINodeList() {
18049          return java.util.Collections.unmodifiableList(deletedINode_);
18050        }
18051        /**
18052         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18053         *
18054         * <pre>
18055         * id of deleted inodes
18056         * </pre>
18057         */
18058        public int getDeletedINodeCount() {
18059          return deletedINode_.size();
18060        }
18061        /**
18062         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18063         *
18064         * <pre>
18065         * id of deleted inodes
18066         * </pre>
18067         */
18068        public long getDeletedINode(int index) {
18069          return deletedINode_.get(index);
18070        }
18071        /**
18072         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18073         *
18074         * <pre>
18075         * id of deleted inodes
18076         * </pre>
18077         */
18078        public Builder setDeletedINode(
18079            int index, long value) {
18080          ensureDeletedINodeIsMutable();
18081          deletedINode_.set(index, value);
18082          onChanged();
18083          return this;
18084        }
18085        /**
18086         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18087         *
18088         * <pre>
18089         * id of deleted inodes
18090         * </pre>
18091         */
18092        public Builder addDeletedINode(long value) {
18093          ensureDeletedINodeIsMutable();
18094          deletedINode_.add(value);
18095          onChanged();
18096          return this;
18097        }
18098        /**
18099         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18100         *
18101         * <pre>
18102         * id of deleted inodes
18103         * </pre>
18104         */
18105        public Builder addAllDeletedINode(
18106            java.lang.Iterable<? extends java.lang.Long> values) {
18107          ensureDeletedINodeIsMutable();
18108          super.addAll(values, deletedINode_);
18109          onChanged();
18110          return this;
18111        }
18112        /**
18113         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
18114         *
18115         * <pre>
18116         * id of deleted inodes
18117         * </pre>
18118         */
18119        public Builder clearDeletedINode() {
18120          deletedINode_ = java.util.Collections.emptyList();
18121          bitField0_ = (bitField0_ & ~0x00000040);
18122          onChanged();
18123          return this;
18124        }
18125
18126        // repeated uint32 deletedINodeRef = 8 [packed = true];
18127        private java.util.List<java.lang.Integer> deletedINodeRef_ = java.util.Collections.emptyList();
18128        private void ensureDeletedINodeRefIsMutable() {
18129          if (!((bitField0_ & 0x00000080) == 0x00000080)) {
18130            deletedINodeRef_ = new java.util.ArrayList<java.lang.Integer>(deletedINodeRef_);
18131            bitField0_ |= 0x00000080;
18132           }
18133        }
18134        /**
18135         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18136         *
18137         * <pre>
18138         * id of reference nodes in the deleted list
18139         * </pre>
18140         */
18141        public java.util.List<java.lang.Integer>
18142            getDeletedINodeRefList() {
18143          return java.util.Collections.unmodifiableList(deletedINodeRef_);
18144        }
18145        /**
18146         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18147         *
18148         * <pre>
18149         * id of reference nodes in the deleted list
18150         * </pre>
18151         */
18152        public int getDeletedINodeRefCount() {
18153          return deletedINodeRef_.size();
18154        }
18155        /**
18156         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18157         *
18158         * <pre>
18159         * id of reference nodes in the deleted list
18160         * </pre>
18161         */
18162        public int getDeletedINodeRef(int index) {
18163          return deletedINodeRef_.get(index);
18164        }
18165        /**
18166         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18167         *
18168         * <pre>
18169         * id of reference nodes in the deleted list
18170         * </pre>
18171         */
18172        public Builder setDeletedINodeRef(
18173            int index, int value) {
18174          ensureDeletedINodeRefIsMutable();
18175          deletedINodeRef_.set(index, value);
18176          onChanged();
18177          return this;
18178        }
18179        /**
18180         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18181         *
18182         * <pre>
18183         * id of reference nodes in the deleted list
18184         * </pre>
18185         */
18186        public Builder addDeletedINodeRef(int value) {
18187          ensureDeletedINodeRefIsMutable();
18188          deletedINodeRef_.add(value);
18189          onChanged();
18190          return this;
18191        }
18192        /**
18193         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18194         *
18195         * <pre>
18196         * id of reference nodes in the deleted list
18197         * </pre>
18198         */
18199        public Builder addAllDeletedINodeRef(
18200            java.lang.Iterable<? extends java.lang.Integer> values) {
18201          ensureDeletedINodeRefIsMutable();
18202          super.addAll(values, deletedINodeRef_);
18203          onChanged();
18204          return this;
18205        }
18206        /**
18207         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
18208         *
18209         * <pre>
18210         * id of reference nodes in the deleted list
18211         * </pre>
18212         */
18213        public Builder clearDeletedINodeRef() {
18214          deletedINodeRef_ = java.util.Collections.emptyList();
18215          bitField0_ = (bitField0_ & ~0x00000080);
18216          onChanged();
18217          return this;
18218        }
18219
18220        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
18221      }
18222
18223      static {
18224        defaultInstance = new DirectoryDiff(true);
18225        defaultInstance.initFields();
18226      }
18227
18228      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
18229    }
18230
18231    public interface FileDiffOrBuilder
18232        extends com.google.protobuf.MessageOrBuilder {
18233
18234      // optional uint32 snapshotId = 1;
18235      /**
18236       * <code>optional uint32 snapshotId = 1;</code>
18237       */
18238      boolean hasSnapshotId();
18239      /**
18240       * <code>optional uint32 snapshotId = 1;</code>
18241       */
18242      int getSnapshotId();
18243
18244      // optional uint64 fileSize = 2;
18245      /**
18246       * <code>optional uint64 fileSize = 2;</code>
18247       */
18248      boolean hasFileSize();
18249      /**
18250       * <code>optional uint64 fileSize = 2;</code>
18251       */
18252      long getFileSize();
18253
18254      // optional bytes name = 3;
18255      /**
18256       * <code>optional bytes name = 3;</code>
18257       */
18258      boolean hasName();
18259      /**
18260       * <code>optional bytes name = 3;</code>
18261       */
18262      com.google.protobuf.ByteString getName();
18263
18264      // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;
18265      /**
18266       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18267       */
18268      boolean hasSnapshotCopy();
18269      /**
18270       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18271       */
18272      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy();
18273      /**
18274       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18275       */
18276      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder();
18277
18278      // repeated .hadoop.hdfs.BlockProto blocks = 5;
18279      /**
18280       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18281       */
18282      java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> 
18283          getBlocksList();
18284      /**
18285       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18286       */
18287      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
18288      /**
18289       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18290       */
18291      int getBlocksCount();
18292      /**
18293       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18294       */
18295      java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
18296          getBlocksOrBuilderList();
18297      /**
18298       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18299       */
18300      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
18301          int index);
18302    }
18303    /**
18304     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff}
18305     */
18306    public static final class FileDiff extends
18307        com.google.protobuf.GeneratedMessage
18308        implements FileDiffOrBuilder {
18309      // Use FileDiff.newBuilder() to construct.
18310      private FileDiff(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
18311        super(builder);
18312        this.unknownFields = builder.getUnknownFields();
18313      }
18314      private FileDiff(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
18315
18316      private static final FileDiff defaultInstance;
18317      public static FileDiff getDefaultInstance() {
18318        return defaultInstance;
18319      }
18320
18321      public FileDiff getDefaultInstanceForType() {
18322        return defaultInstance;
18323      }
18324
18325      private final com.google.protobuf.UnknownFieldSet unknownFields;
18326      @java.lang.Override
18327      public final com.google.protobuf.UnknownFieldSet
18328          getUnknownFields() {
18329        return this.unknownFields;
18330      }
18331      private FileDiff(
18332          com.google.protobuf.CodedInputStream input,
18333          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18334          throws com.google.protobuf.InvalidProtocolBufferException {
18335        initFields();
18336        int mutable_bitField0_ = 0;
18337        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
18338            com.google.protobuf.UnknownFieldSet.newBuilder();
18339        try {
18340          boolean done = false;
18341          while (!done) {
18342            int tag = input.readTag();
18343            switch (tag) {
18344              case 0:
18345                done = true;
18346                break;
18347              default: {
18348                if (!parseUnknownField(input, unknownFields,
18349                                       extensionRegistry, tag)) {
18350                  done = true;
18351                }
18352                break;
18353              }
18354              case 8: {
18355                bitField0_ |= 0x00000001;
18356                snapshotId_ = input.readUInt32();
18357                break;
18358              }
18359              case 16: {
18360                bitField0_ |= 0x00000002;
18361                fileSize_ = input.readUInt64();
18362                break;
18363              }
18364              case 26: {
18365                bitField0_ |= 0x00000004;
18366                name_ = input.readBytes();
18367                break;
18368              }
18369              case 34: {
18370                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder subBuilder = null;
18371                if (((bitField0_ & 0x00000008) == 0x00000008)) {
18372                  subBuilder = snapshotCopy_.toBuilder();
18373                }
18374                snapshotCopy_ = input.readMessage(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.PARSER, extensionRegistry);
18375                if (subBuilder != null) {
18376                  subBuilder.mergeFrom(snapshotCopy_);
18377                  snapshotCopy_ = subBuilder.buildPartial();
18378                }
18379                bitField0_ |= 0x00000008;
18380                break;
18381              }
18382              case 42: {
18383                if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
18384                  blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>();
18385                  mutable_bitField0_ |= 0x00000010;
18386                }
18387                blocks_.add(input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER, extensionRegistry));
18388                break;
18389              }
18390            }
18391          }
18392        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
18393          throw e.setUnfinishedMessage(this);
18394        } catch (java.io.IOException e) {
18395          throw new com.google.protobuf.InvalidProtocolBufferException(
18396              e.getMessage()).setUnfinishedMessage(this);
18397        } finally {
18398          if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
18399            blocks_ = java.util.Collections.unmodifiableList(blocks_);
18400          }
18401          this.unknownFields = unknownFields.build();
18402          makeExtensionsImmutable();
18403        }
18404      }
18405      public static final com.google.protobuf.Descriptors.Descriptor
18406          getDescriptor() {
18407        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
18408      }
18409
18410      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18411          internalGetFieldAccessorTable() {
18412        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable
18413            .ensureFieldAccessorsInitialized(
18414                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class);
18415      }
18416
18417      public static com.google.protobuf.Parser<FileDiff> PARSER =
18418          new com.google.protobuf.AbstractParser<FileDiff>() {
18419        public FileDiff parsePartialFrom(
18420            com.google.protobuf.CodedInputStream input,
18421            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18422            throws com.google.protobuf.InvalidProtocolBufferException {
18423          return new FileDiff(input, extensionRegistry);
18424        }
18425      };
18426
18427      @java.lang.Override
18428      public com.google.protobuf.Parser<FileDiff> getParserForType() {
18429        return PARSER;
18430      }
18431
18432      private int bitField0_;
18433      // optional uint32 snapshotId = 1;
18434      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
18435      private int snapshotId_;
18436      /**
18437       * <code>optional uint32 snapshotId = 1;</code>
18438       */
18439      public boolean hasSnapshotId() {
18440        return ((bitField0_ & 0x00000001) == 0x00000001);
18441      }
18442      /**
18443       * <code>optional uint32 snapshotId = 1;</code>
18444       */
18445      public int getSnapshotId() {
18446        return snapshotId_;
18447      }
18448
18449      // optional uint64 fileSize = 2;
18450      public static final int FILESIZE_FIELD_NUMBER = 2;
18451      private long fileSize_;
18452      /**
18453       * <code>optional uint64 fileSize = 2;</code>
18454       */
18455      public boolean hasFileSize() {
18456        return ((bitField0_ & 0x00000002) == 0x00000002);
18457      }
18458      /**
18459       * <code>optional uint64 fileSize = 2;</code>
18460       */
18461      public long getFileSize() {
18462        return fileSize_;
18463      }
18464
18465      // optional bytes name = 3;
18466      public static final int NAME_FIELD_NUMBER = 3;
18467      private com.google.protobuf.ByteString name_;
18468      /**
18469       * <code>optional bytes name = 3;</code>
18470       */
18471      public boolean hasName() {
18472        return ((bitField0_ & 0x00000004) == 0x00000004);
18473      }
18474      /**
18475       * <code>optional bytes name = 3;</code>
18476       */
18477      public com.google.protobuf.ByteString getName() {
18478        return name_;
18479      }
18480
18481      // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;
18482      public static final int SNAPSHOTCOPY_FIELD_NUMBER = 4;
18483      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_;
18484      /**
18485       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18486       */
18487      public boolean hasSnapshotCopy() {
18488        return ((bitField0_ & 0x00000008) == 0x00000008);
18489      }
18490      /**
18491       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18492       */
18493      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() {
18494        return snapshotCopy_;
18495      }
18496      /**
18497       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
18498       */
18499      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() {
18500        return snapshotCopy_;
18501      }
18502
18503      // repeated .hadoop.hdfs.BlockProto blocks = 5;
18504      public static final int BLOCKS_FIELD_NUMBER = 5;
18505      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
18506      /**
18507       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18508       */
18509      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
18510        return blocks_;
18511      }
18512      /**
18513       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18514       */
18515      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
18516          getBlocksOrBuilderList() {
18517        return blocks_;
18518      }
18519      /**
18520       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18521       */
18522      public int getBlocksCount() {
18523        return blocks_.size();
18524      }
18525      /**
18526       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18527       */
18528      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
18529        return blocks_.get(index);
18530      }
18531      /**
18532       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
18533       */
18534      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
18535          int index) {
18536        return blocks_.get(index);
18537      }
18538
18539      private void initFields() {
18540        snapshotId_ = 0;
18541        fileSize_ = 0L;
18542        name_ = com.google.protobuf.ByteString.EMPTY;
18543        snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
18544        blocks_ = java.util.Collections.emptyList();
18545      }
18546      private byte memoizedIsInitialized = -1;
18547      public final boolean isInitialized() {
18548        byte isInitialized = memoizedIsInitialized;
18549        if (isInitialized != -1) return isInitialized == 1;
18550
18551        if (hasSnapshotCopy()) {
18552          if (!getSnapshotCopy().isInitialized()) {
18553            memoizedIsInitialized = 0;
18554            return false;
18555          }
18556        }
18557        for (int i = 0; i < getBlocksCount(); i++) {
18558          if (!getBlocks(i).isInitialized()) {
18559            memoizedIsInitialized = 0;
18560            return false;
18561          }
18562        }
18563        memoizedIsInitialized = 1;
18564        return true;
18565      }
18566
18567      public void writeTo(com.google.protobuf.CodedOutputStream output)
18568                          throws java.io.IOException {
18569        getSerializedSize();
18570        if (((bitField0_ & 0x00000001) == 0x00000001)) {
18571          output.writeUInt32(1, snapshotId_);
18572        }
18573        if (((bitField0_ & 0x00000002) == 0x00000002)) {
18574          output.writeUInt64(2, fileSize_);
18575        }
18576        if (((bitField0_ & 0x00000004) == 0x00000004)) {
18577          output.writeBytes(3, name_);
18578        }
18579        if (((bitField0_ & 0x00000008) == 0x00000008)) {
18580          output.writeMessage(4, snapshotCopy_);
18581        }
18582        for (int i = 0; i < blocks_.size(); i++) {
18583          output.writeMessage(5, blocks_.get(i));
18584        }
18585        getUnknownFields().writeTo(output);
18586      }
18587
18588      private int memoizedSerializedSize = -1;
18589      public int getSerializedSize() {
18590        int size = memoizedSerializedSize;
18591        if (size != -1) return size;
18592
18593        size = 0;
18594        if (((bitField0_ & 0x00000001) == 0x00000001)) {
18595          size += com.google.protobuf.CodedOutputStream
18596            .computeUInt32Size(1, snapshotId_);
18597        }
18598        if (((bitField0_ & 0x00000002) == 0x00000002)) {
18599          size += com.google.protobuf.CodedOutputStream
18600            .computeUInt64Size(2, fileSize_);
18601        }
18602        if (((bitField0_ & 0x00000004) == 0x00000004)) {
18603          size += com.google.protobuf.CodedOutputStream
18604            .computeBytesSize(3, name_);
18605        }
18606        if (((bitField0_ & 0x00000008) == 0x00000008)) {
18607          size += com.google.protobuf.CodedOutputStream
18608            .computeMessageSize(4, snapshotCopy_);
18609        }
18610        for (int i = 0; i < blocks_.size(); i++) {
18611          size += com.google.protobuf.CodedOutputStream
18612            .computeMessageSize(5, blocks_.get(i));
18613        }
18614        size += getUnknownFields().getSerializedSize();
18615        memoizedSerializedSize = size;
18616        return size;
18617      }
18618
18619      private static final long serialVersionUID = 0L;
18620      @java.lang.Override
18621      protected java.lang.Object writeReplace()
18622          throws java.io.ObjectStreamException {
18623        return super.writeReplace();
18624      }
18625
18626      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18627          com.google.protobuf.ByteString data)
18628          throws com.google.protobuf.InvalidProtocolBufferException {
18629        return PARSER.parseFrom(data);
18630      }
18631      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18632          com.google.protobuf.ByteString data,
18633          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18634          throws com.google.protobuf.InvalidProtocolBufferException {
18635        return PARSER.parseFrom(data, extensionRegistry);
18636      }
18637      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(byte[] data)
18638          throws com.google.protobuf.InvalidProtocolBufferException {
18639        return PARSER.parseFrom(data);
18640      }
18641      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18642          byte[] data,
18643          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18644          throws com.google.protobuf.InvalidProtocolBufferException {
18645        return PARSER.parseFrom(data, extensionRegistry);
18646      }
18647      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(java.io.InputStream input)
18648          throws java.io.IOException {
18649        return PARSER.parseFrom(input);
18650      }
18651      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18652          java.io.InputStream input,
18653          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18654          throws java.io.IOException {
18655        return PARSER.parseFrom(input, extensionRegistry);
18656      }
18657      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom(java.io.InputStream input)
18658          throws java.io.IOException {
18659        return PARSER.parseDelimitedFrom(input);
18660      }
18661      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom(
18662          java.io.InputStream input,
18663          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18664          throws java.io.IOException {
18665        return PARSER.parseDelimitedFrom(input, extensionRegistry);
18666      }
18667      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18668          com.google.protobuf.CodedInputStream input)
18669          throws java.io.IOException {
18670        return PARSER.parseFrom(input);
18671      }
18672      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
18673          com.google.protobuf.CodedInputStream input,
18674          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18675          throws java.io.IOException {
18676        return PARSER.parseFrom(input, extensionRegistry);
18677      }
18678
18679      public static Builder newBuilder() { return Builder.create(); }
18680      public Builder newBuilderForType() { return newBuilder(); }
18681      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff prototype) {
18682        return newBuilder().mergeFrom(prototype);
18683      }
18684      public Builder toBuilder() { return newBuilder(this); }
18685
18686      @java.lang.Override
18687      protected Builder newBuilderForType(
18688          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18689        Builder builder = new Builder(parent);
18690        return builder;
18691      }
18692      /**
18693       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff}
18694       */
18695      public static final class Builder extends
18696          com.google.protobuf.GeneratedMessage.Builder<Builder>
18697         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiffOrBuilder {
18698        public static final com.google.protobuf.Descriptors.Descriptor
18699            getDescriptor() {
18700          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
18701        }
18702
18703        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18704            internalGetFieldAccessorTable() {
18705          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable
18706              .ensureFieldAccessorsInitialized(
18707                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class);
18708        }
18709
18710        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.newBuilder()
18711        private Builder() {
18712          maybeForceBuilderInitialization();
18713        }
18714
18715        private Builder(
18716            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18717          super(parent);
18718          maybeForceBuilderInitialization();
18719        }
18720        private void maybeForceBuilderInitialization() {
18721          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
18722            getSnapshotCopyFieldBuilder();
18723            getBlocksFieldBuilder();
18724          }
18725        }
18726        private static Builder create() {
18727          return new Builder();
18728        }
18729
18730        public Builder clear() {
18731          super.clear();
18732          snapshotId_ = 0;
18733          bitField0_ = (bitField0_ & ~0x00000001);
18734          fileSize_ = 0L;
18735          bitField0_ = (bitField0_ & ~0x00000002);
18736          name_ = com.google.protobuf.ByteString.EMPTY;
18737          bitField0_ = (bitField0_ & ~0x00000004);
18738          if (snapshotCopyBuilder_ == null) {
18739            snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
18740          } else {
18741            snapshotCopyBuilder_.clear();
18742          }
18743          bitField0_ = (bitField0_ & ~0x00000008);
18744          if (blocksBuilder_ == null) {
18745            blocks_ = java.util.Collections.emptyList();
18746            bitField0_ = (bitField0_ & ~0x00000010);
18747          } else {
18748            blocksBuilder_.clear();
18749          }
18750          return this;
18751        }
18752
18753        public Builder clone() {
18754          return create().mergeFrom(buildPartial());
18755        }
18756
18757        public com.google.protobuf.Descriptors.Descriptor
18758            getDescriptorForType() {
18759          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
18760        }
18761
18762        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstanceForType() {
18763          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance();
18764        }
18765
18766        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff build() {
18767          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = buildPartial();
18768          if (!result.isInitialized()) {
18769            throw newUninitializedMessageException(result);
18770          }
18771          return result;
18772        }
18773
18774        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff buildPartial() {
18775          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff(this);
18776          int from_bitField0_ = bitField0_;
18777          int to_bitField0_ = 0;
18778          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
18779            to_bitField0_ |= 0x00000001;
18780          }
18781          result.snapshotId_ = snapshotId_;
18782          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
18783            to_bitField0_ |= 0x00000002;
18784          }
18785          result.fileSize_ = fileSize_;
18786          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
18787            to_bitField0_ |= 0x00000004;
18788          }
18789          result.name_ = name_;
18790          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
18791            to_bitField0_ |= 0x00000008;
18792          }
18793          if (snapshotCopyBuilder_ == null) {
18794            result.snapshotCopy_ = snapshotCopy_;
18795          } else {
18796            result.snapshotCopy_ = snapshotCopyBuilder_.build();
18797          }
18798          if (blocksBuilder_ == null) {
18799            if (((bitField0_ & 0x00000010) == 0x00000010)) {
18800              blocks_ = java.util.Collections.unmodifiableList(blocks_);
18801              bitField0_ = (bitField0_ & ~0x00000010);
18802            }
18803            result.blocks_ = blocks_;
18804          } else {
18805            result.blocks_ = blocksBuilder_.build();
18806          }
18807          result.bitField0_ = to_bitField0_;
18808          onBuilt();
18809          return result;
18810        }
18811
18812        public Builder mergeFrom(com.google.protobuf.Message other) {
18813          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) {
18814            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff)other);
18815          } else {
18816            super.mergeFrom(other);
18817            return this;
18818          }
18819        }
18820
18821        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff other) {
18822          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance()) return this;
18823          if (other.hasSnapshotId()) {
18824            setSnapshotId(other.getSnapshotId());
18825          }
18826          if (other.hasFileSize()) {
18827            setFileSize(other.getFileSize());
18828          }
18829          if (other.hasName()) {
18830            setName(other.getName());
18831          }
18832          if (other.hasSnapshotCopy()) {
18833            mergeSnapshotCopy(other.getSnapshotCopy());
18834          }
18835          if (blocksBuilder_ == null) {
18836            if (!other.blocks_.isEmpty()) {
18837              if (blocks_.isEmpty()) {
18838                blocks_ = other.blocks_;
18839                bitField0_ = (bitField0_ & ~0x00000010);
18840              } else {
18841                ensureBlocksIsMutable();
18842                blocks_.addAll(other.blocks_);
18843              }
18844              onChanged();
18845            }
18846          } else {
18847            if (!other.blocks_.isEmpty()) {
18848              if (blocksBuilder_.isEmpty()) {
18849                blocksBuilder_.dispose();
18850                blocksBuilder_ = null;
18851                blocks_ = other.blocks_;
18852                bitField0_ = (bitField0_ & ~0x00000010);
18853                blocksBuilder_ = 
18854                  com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
18855                     getBlocksFieldBuilder() : null;
18856              } else {
18857                blocksBuilder_.addAllMessages(other.blocks_);
18858              }
18859            }
18860          }
18861          this.mergeUnknownFields(other.getUnknownFields());
18862          return this;
18863        }
18864
18865        public final boolean isInitialized() {
18866          if (hasSnapshotCopy()) {
18867            if (!getSnapshotCopy().isInitialized()) {
18868              
18869              return false;
18870            }
18871          }
18872          for (int i = 0; i < getBlocksCount(); i++) {
18873            if (!getBlocks(i).isInitialized()) {
18874              
18875              return false;
18876            }
18877          }
18878          return true;
18879        }
18880
18881        public Builder mergeFrom(
18882            com.google.protobuf.CodedInputStream input,
18883            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18884            throws java.io.IOException {
18885          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parsedMessage = null;
18886          try {
18887            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
18888          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
18889            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) e.getUnfinishedMessage();
18890            throw e;
18891          } finally {
18892            if (parsedMessage != null) {
18893              mergeFrom(parsedMessage);
18894            }
18895          }
18896          return this;
18897        }
18898        private int bitField0_;
18899
18900        // optional uint32 snapshotId = 1;
18901        private int snapshotId_ ;
18902        /**
18903         * <code>optional uint32 snapshotId = 1;</code>
18904         */
18905        public boolean hasSnapshotId() {
18906          return ((bitField0_ & 0x00000001) == 0x00000001);
18907        }
18908        /**
18909         * <code>optional uint32 snapshotId = 1;</code>
18910         */
18911        public int getSnapshotId() {
18912          return snapshotId_;
18913        }
18914        /**
18915         * <code>optional uint32 snapshotId = 1;</code>
18916         */
18917        public Builder setSnapshotId(int value) {
18918          bitField0_ |= 0x00000001;
18919          snapshotId_ = value;
18920          onChanged();
18921          return this;
18922        }
18923        /**
18924         * <code>optional uint32 snapshotId = 1;</code>
18925         */
18926        public Builder clearSnapshotId() {
18927          bitField0_ = (bitField0_ & ~0x00000001);
18928          snapshotId_ = 0;
18929          onChanged();
18930          return this;
18931        }
18932
18933        // optional uint64 fileSize = 2;
18934        private long fileSize_ ;
18935        /**
18936         * <code>optional uint64 fileSize = 2;</code>
18937         */
18938        public boolean hasFileSize() {
18939          return ((bitField0_ & 0x00000002) == 0x00000002);
18940        }
18941        /**
18942         * <code>optional uint64 fileSize = 2;</code>
18943         */
18944        public long getFileSize() {
18945          return fileSize_;
18946        }
18947        /**
18948         * <code>optional uint64 fileSize = 2;</code>
18949         */
18950        public Builder setFileSize(long value) {
18951          bitField0_ |= 0x00000002;
18952          fileSize_ = value;
18953          onChanged();
18954          return this;
18955        }
18956        /**
18957         * <code>optional uint64 fileSize = 2;</code>
18958         */
18959        public Builder clearFileSize() {
18960          bitField0_ = (bitField0_ & ~0x00000002);
18961          fileSize_ = 0L;
18962          onChanged();
18963          return this;
18964        }
18965
18966        // optional bytes name = 3;
18967        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
18968        /**
18969         * <code>optional bytes name = 3;</code>
18970         */
18971        public boolean hasName() {
18972          return ((bitField0_ & 0x00000004) == 0x00000004);
18973        }
18974        /**
18975         * <code>optional bytes name = 3;</code>
18976         */
18977        public com.google.protobuf.ByteString getName() {
18978          return name_;
18979        }
18980        /**
18981         * <code>optional bytes name = 3;</code>
18982         */
18983        public Builder setName(com.google.protobuf.ByteString value) {
18984          if (value == null) {
18985    throw new NullPointerException();
18986  }
18987  bitField0_ |= 0x00000004;
18988          name_ = value;
18989          onChanged();
18990          return this;
18991        }
18992        /**
18993         * <code>optional bytes name = 3;</code>
18994         */
18995        public Builder clearName() {
18996          bitField0_ = (bitField0_ & ~0x00000004);
18997          name_ = getDefaultInstance().getName();
18998          onChanged();
18999          return this;
19000        }
19001
19002        // optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;
19003        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
19004        private com.google.protobuf.SingleFieldBuilder<
19005            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> snapshotCopyBuilder_;
19006        /**
19007         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19008         */
19009        public boolean hasSnapshotCopy() {
19010          return ((bitField0_ & 0x00000008) == 0x00000008);
19011        }
19012        /**
19013         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19014         */
19015        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() {
19016          if (snapshotCopyBuilder_ == null) {
19017            return snapshotCopy_;
19018          } else {
19019            return snapshotCopyBuilder_.getMessage();
19020          }
19021        }
19022        /**
19023         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19024         */
19025        public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
19026          if (snapshotCopyBuilder_ == null) {
19027            if (value == null) {
19028              throw new NullPointerException();
19029            }
19030            snapshotCopy_ = value;
19031            onChanged();
19032          } else {
19033            snapshotCopyBuilder_.setMessage(value);
19034          }
19035          bitField0_ |= 0x00000008;
19036          return this;
19037        }
19038        /**
19039         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19040         */
19041        public Builder setSnapshotCopy(
19042            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) {
19043          if (snapshotCopyBuilder_ == null) {
19044            snapshotCopy_ = builderForValue.build();
19045            onChanged();
19046          } else {
19047            snapshotCopyBuilder_.setMessage(builderForValue.build());
19048          }
19049          bitField0_ |= 0x00000008;
19050          return this;
19051        }
19052        /**
19053         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19054         */
19055        public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
19056          if (snapshotCopyBuilder_ == null) {
19057            if (((bitField0_ & 0x00000008) == 0x00000008) &&
19058                snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
19059              snapshotCopy_ =
19060                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder(snapshotCopy_).mergeFrom(value).buildPartial();
19061            } else {
19062              snapshotCopy_ = value;
19063            }
19064            onChanged();
19065          } else {
19066            snapshotCopyBuilder_.mergeFrom(value);
19067          }
19068          bitField0_ |= 0x00000008;
19069          return this;
19070        }
19071        /**
19072         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19073         */
19074        public Builder clearSnapshotCopy() {
19075          if (snapshotCopyBuilder_ == null) {
19076            snapshotCopy_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
19077            onChanged();
19078          } else {
19079            snapshotCopyBuilder_.clear();
19080          }
19081          bitField0_ = (bitField0_ & ~0x00000008);
19082          return this;
19083        }
19084        /**
19085         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19086         */
19087        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getSnapshotCopyBuilder() {
19088          bitField0_ |= 0x00000008;
19089          onChanged();
19090          return getSnapshotCopyFieldBuilder().getBuilder();
19091        }
19092        /**
19093         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19094         */
19095        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() {
19096          if (snapshotCopyBuilder_ != null) {
19097            return snapshotCopyBuilder_.getMessageOrBuilder();
19098          } else {
19099            return snapshotCopy_;
19100          }
19101        }
19102        /**
19103         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
19104         */
19105        private com.google.protobuf.SingleFieldBuilder<
19106            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> 
19107            getSnapshotCopyFieldBuilder() {
19108          if (snapshotCopyBuilder_ == null) {
19109            snapshotCopyBuilder_ = new com.google.protobuf.SingleFieldBuilder<
19110                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>(
19111                    snapshotCopy_,
19112                    getParentForChildren(),
19113                    isClean());
19114            snapshotCopy_ = null;
19115          }
19116          return snapshotCopyBuilder_;
19117        }
19118
19119        // repeated .hadoop.hdfs.BlockProto blocks = 5;
19120        private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
19121          java.util.Collections.emptyList();
19122        private void ensureBlocksIsMutable() {
19123          if (!((bitField0_ & 0x00000010) == 0x00000010)) {
19124            blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
19125            bitField0_ |= 0x00000010;
19126           }
19127        }
19128
19129        private com.google.protobuf.RepeatedFieldBuilder<
19130            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;
19131
19132        /**
19133         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19134         */
19135        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
19136          if (blocksBuilder_ == null) {
19137            return java.util.Collections.unmodifiableList(blocks_);
19138          } else {
19139            return blocksBuilder_.getMessageList();
19140          }
19141        }
19142        /**
19143         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19144         */
19145        public int getBlocksCount() {
19146          if (blocksBuilder_ == null) {
19147            return blocks_.size();
19148          } else {
19149            return blocksBuilder_.getCount();
19150          }
19151        }
19152        /**
19153         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19154         */
19155        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
19156          if (blocksBuilder_ == null) {
19157            return blocks_.get(index);
19158          } else {
19159            return blocksBuilder_.getMessage(index);
19160          }
19161        }
19162        /**
19163         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19164         */
19165        public Builder setBlocks(
19166            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
19167          if (blocksBuilder_ == null) {
19168            if (value == null) {
19169              throw new NullPointerException();
19170            }
19171            ensureBlocksIsMutable();
19172            blocks_.set(index, value);
19173            onChanged();
19174          } else {
19175            blocksBuilder_.setMessage(index, value);
19176          }
19177          return this;
19178        }
19179        /**
19180         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19181         */
19182        public Builder setBlocks(
19183            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
19184          if (blocksBuilder_ == null) {
19185            ensureBlocksIsMutable();
19186            blocks_.set(index, builderForValue.build());
19187            onChanged();
19188          } else {
19189            blocksBuilder_.setMessage(index, builderForValue.build());
19190          }
19191          return this;
19192        }
19193        /**
19194         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19195         */
19196        public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
19197          if (blocksBuilder_ == null) {
19198            if (value == null) {
19199              throw new NullPointerException();
19200            }
19201            ensureBlocksIsMutable();
19202            blocks_.add(value);
19203            onChanged();
19204          } else {
19205            blocksBuilder_.addMessage(value);
19206          }
19207          return this;
19208        }
19209        /**
19210         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19211         */
19212        public Builder addBlocks(
19213            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
19214          if (blocksBuilder_ == null) {
19215            if (value == null) {
19216              throw new NullPointerException();
19217            }
19218            ensureBlocksIsMutable();
19219            blocks_.add(index, value);
19220            onChanged();
19221          } else {
19222            blocksBuilder_.addMessage(index, value);
19223          }
19224          return this;
19225        }
19226        /**
19227         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19228         */
19229        public Builder addBlocks(
19230            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
19231          if (blocksBuilder_ == null) {
19232            ensureBlocksIsMutable();
19233            blocks_.add(builderForValue.build());
19234            onChanged();
19235          } else {
19236            blocksBuilder_.addMessage(builderForValue.build());
19237          }
19238          return this;
19239        }
19240        /**
19241         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19242         */
19243        public Builder addBlocks(
19244            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
19245          if (blocksBuilder_ == null) {
19246            ensureBlocksIsMutable();
19247            blocks_.add(index, builderForValue.build());
19248            onChanged();
19249          } else {
19250            blocksBuilder_.addMessage(index, builderForValue.build());
19251          }
19252          return this;
19253        }
19254        /**
19255         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19256         */
19257        public Builder addAllBlocks(
19258            java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
19259          if (blocksBuilder_ == null) {
19260            ensureBlocksIsMutable();
19261            super.addAll(values, blocks_);
19262            onChanged();
19263          } else {
19264            blocksBuilder_.addAllMessages(values);
19265          }
19266          return this;
19267        }
19268        /**
19269         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19270         */
19271        public Builder clearBlocks() {
19272          if (blocksBuilder_ == null) {
19273            blocks_ = java.util.Collections.emptyList();
19274            bitField0_ = (bitField0_ & ~0x00000010);
19275            onChanged();
19276          } else {
19277            blocksBuilder_.clear();
19278          }
19279          return this;
19280        }
19281        /**
19282         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19283         */
19284        public Builder removeBlocks(int index) {
19285          if (blocksBuilder_ == null) {
19286            ensureBlocksIsMutable();
19287            blocks_.remove(index);
19288            onChanged();
19289          } else {
19290            blocksBuilder_.remove(index);
19291          }
19292          return this;
19293        }
19294        /**
19295         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19296         */
19297        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
19298            int index) {
19299          return getBlocksFieldBuilder().getBuilder(index);
19300        }
19301        /**
19302         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19303         */
19304        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
19305            int index) {
19306          if (blocksBuilder_ == null) {
19307            return blocks_.get(index);  } else {
19308            return blocksBuilder_.getMessageOrBuilder(index);
19309          }
19310        }
19311        /**
19312         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19313         */
19314        public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
19315             getBlocksOrBuilderList() {
19316          if (blocksBuilder_ != null) {
19317            return blocksBuilder_.getMessageOrBuilderList();
19318          } else {
19319            return java.util.Collections.unmodifiableList(blocks_);
19320          }
19321        }
19322        /**
19323         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19324         */
19325        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
19326          return getBlocksFieldBuilder().addBuilder(
19327              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
19328        }
19329        /**
19330         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19331         */
19332        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
19333            int index) {
19334          return getBlocksFieldBuilder().addBuilder(
19335              index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
19336        }
19337        /**
19338         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
19339         */
19340        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder> 
19341             getBlocksBuilderList() {
19342          return getBlocksFieldBuilder().getBuilderList();
19343        }
19344        private com.google.protobuf.RepeatedFieldBuilder<
19345            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
19346            getBlocksFieldBuilder() {
19347          if (blocksBuilder_ == null) {
19348            blocksBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
19349                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
19350                    blocks_,
19351                    ((bitField0_ & 0x00000010) == 0x00000010),
19352                    getParentForChildren(),
19353                    isClean());
19354            blocks_ = null;
19355          }
19356          return blocksBuilder_;
19357        }
19358
19359        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
19360      }
19361
19362      static {
19363        defaultInstance = new FileDiff(true);
19364        defaultInstance.initFields();
19365      }
19366
19367      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
19368    }
19369
19370    public interface DiffEntryOrBuilder
19371        extends com.google.protobuf.MessageOrBuilder {
19372
19373      // required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;
19374      /**
19375       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19376       */
19377      boolean hasType();
19378      /**
19379       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19380       */
19381      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType();
19382
19383      // optional uint64 inodeId = 2;
19384      /**
19385       * <code>optional uint64 inodeId = 2;</code>
19386       */
19387      boolean hasInodeId();
19388      /**
19389       * <code>optional uint64 inodeId = 2;</code>
19390       */
19391      long getInodeId();
19392
19393      // optional uint32 numOfDiff = 3;
19394      /**
19395       * <code>optional uint32 numOfDiff = 3;</code>
19396       */
19397      boolean hasNumOfDiff();
19398      /**
19399       * <code>optional uint32 numOfDiff = 3;</code>
19400       */
19401      int getNumOfDiff();
19402    }
19403    /**
19404     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry}
19405     */
19406    public static final class DiffEntry extends
19407        com.google.protobuf.GeneratedMessage
19408        implements DiffEntryOrBuilder {
19409      // Use DiffEntry.newBuilder() to construct.
19410      private DiffEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
19411        super(builder);
19412        this.unknownFields = builder.getUnknownFields();
19413      }
19414      private DiffEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
19415
19416      private static final DiffEntry defaultInstance;
19417      public static DiffEntry getDefaultInstance() {
19418        return defaultInstance;
19419      }
19420
19421      public DiffEntry getDefaultInstanceForType() {
19422        return defaultInstance;
19423      }
19424
19425      private final com.google.protobuf.UnknownFieldSet unknownFields;
19426      @java.lang.Override
19427      public final com.google.protobuf.UnknownFieldSet
19428          getUnknownFields() {
19429        return this.unknownFields;
19430      }
19431      private DiffEntry(
19432          com.google.protobuf.CodedInputStream input,
19433          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19434          throws com.google.protobuf.InvalidProtocolBufferException {
19435        initFields();
19436        int mutable_bitField0_ = 0;
19437        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
19438            com.google.protobuf.UnknownFieldSet.newBuilder();
19439        try {
19440          boolean done = false;
19441          while (!done) {
19442            int tag = input.readTag();
19443            switch (tag) {
19444              case 0:
19445                done = true;
19446                break;
19447              default: {
19448                if (!parseUnknownField(input, unknownFields,
19449                                       extensionRegistry, tag)) {
19450                  done = true;
19451                }
19452                break;
19453              }
19454              case 8: {
19455                int rawValue = input.readEnum();
19456                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type value = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.valueOf(rawValue);
19457                if (value == null) {
19458                  unknownFields.mergeVarintField(1, rawValue);
19459                } else {
19460                  bitField0_ |= 0x00000001;
19461                  type_ = value;
19462                }
19463                break;
19464              }
19465              case 16: {
19466                bitField0_ |= 0x00000002;
19467                inodeId_ = input.readUInt64();
19468                break;
19469              }
19470              case 24: {
19471                bitField0_ |= 0x00000004;
19472                numOfDiff_ = input.readUInt32();
19473                break;
19474              }
19475            }
19476          }
19477        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
19478          throw e.setUnfinishedMessage(this);
19479        } catch (java.io.IOException e) {
19480          throw new com.google.protobuf.InvalidProtocolBufferException(
19481              e.getMessage()).setUnfinishedMessage(this);
19482        } finally {
19483          this.unknownFields = unknownFields.build();
19484          makeExtensionsImmutable();
19485        }
19486      }
19487      public static final com.google.protobuf.Descriptors.Descriptor
19488          getDescriptor() {
19489        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
19490      }
19491
19492      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
19493          internalGetFieldAccessorTable() {
19494        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable
19495            .ensureFieldAccessorsInitialized(
19496                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class);
19497      }
19498
19499      public static com.google.protobuf.Parser<DiffEntry> PARSER =
19500          new com.google.protobuf.AbstractParser<DiffEntry>() {
19501        public DiffEntry parsePartialFrom(
19502            com.google.protobuf.CodedInputStream input,
19503            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19504            throws com.google.protobuf.InvalidProtocolBufferException {
19505          return new DiffEntry(input, extensionRegistry);
19506        }
19507      };
19508
19509      @java.lang.Override
19510      public com.google.protobuf.Parser<DiffEntry> getParserForType() {
19511        return PARSER;
19512      }
19513
19514      /**
19515       * Protobuf enum {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type}
19516       */
19517      public enum Type
19518          implements com.google.protobuf.ProtocolMessageEnum {
19519        /**
19520         * <code>FILEDIFF = 1;</code>
19521         */
19522        FILEDIFF(0, 1),
19523        /**
19524         * <code>DIRECTORYDIFF = 2;</code>
19525         */
19526        DIRECTORYDIFF(1, 2),
19527        ;
19528
19529        /**
19530         * <code>FILEDIFF = 1;</code>
19531         */
19532        public static final int FILEDIFF_VALUE = 1;
19533        /**
19534         * <code>DIRECTORYDIFF = 2;</code>
19535         */
19536        public static final int DIRECTORYDIFF_VALUE = 2;
19537
19538
19539        public final int getNumber() { return value; }
19540
19541        public static Type valueOf(int value) {
19542          switch (value) {
19543            case 1: return FILEDIFF;
19544            case 2: return DIRECTORYDIFF;
19545            default: return null;
19546          }
19547        }
19548
19549        public static com.google.protobuf.Internal.EnumLiteMap<Type>
19550            internalGetValueMap() {
19551          return internalValueMap;
19552        }
19553        private static com.google.protobuf.Internal.EnumLiteMap<Type>
19554            internalValueMap =
19555              new com.google.protobuf.Internal.EnumLiteMap<Type>() {
19556                public Type findValueByNumber(int number) {
19557                  return Type.valueOf(number);
19558                }
19559              };
19560
19561        public final com.google.protobuf.Descriptors.EnumValueDescriptor
19562            getValueDescriptor() {
19563          return getDescriptor().getValues().get(index);
19564        }
19565        public final com.google.protobuf.Descriptors.EnumDescriptor
19566            getDescriptorForType() {
19567          return getDescriptor();
19568        }
19569        public static final com.google.protobuf.Descriptors.EnumDescriptor
19570            getDescriptor() {
19571          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDescriptor().getEnumTypes().get(0);
19572        }
19573
19574        private static final Type[] VALUES = values();
19575
19576        public static Type valueOf(
19577            com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
19578          if (desc.getType() != getDescriptor()) {
19579            throw new java.lang.IllegalArgumentException(
19580              "EnumValueDescriptor is not for this type.");
19581          }
19582          return VALUES[desc.getIndex()];
19583        }
19584
19585        private final int index;
19586        private final int value;
19587
19588        private Type(int index, int value) {
19589          this.index = index;
19590          this.value = value;
19591        }
19592
19593        // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type)
19594      }
19595
19596      private int bitField0_;
19597      // required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;
19598      public static final int TYPE_FIELD_NUMBER = 1;
19599      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type type_;
19600      /**
19601       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19602       */
19603      public boolean hasType() {
19604        return ((bitField0_ & 0x00000001) == 0x00000001);
19605      }
19606      /**
19607       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19608       */
19609      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() {
19610        return type_;
19611      }
19612
19613      // optional uint64 inodeId = 2;
19614      public static final int INODEID_FIELD_NUMBER = 2;
19615      private long inodeId_;
19616      /**
19617       * <code>optional uint64 inodeId = 2;</code>
19618       */
19619      public boolean hasInodeId() {
19620        return ((bitField0_ & 0x00000002) == 0x00000002);
19621      }
19622      /**
19623       * <code>optional uint64 inodeId = 2;</code>
19624       */
19625      public long getInodeId() {
19626        return inodeId_;
19627      }
19628
19629      // optional uint32 numOfDiff = 3;
19630      public static final int NUMOFDIFF_FIELD_NUMBER = 3;
19631      private int numOfDiff_;
19632      /**
19633       * <code>optional uint32 numOfDiff = 3;</code>
19634       */
19635      public boolean hasNumOfDiff() {
19636        return ((bitField0_ & 0x00000004) == 0x00000004);
19637      }
19638      /**
19639       * <code>optional uint32 numOfDiff = 3;</code>
19640       */
19641      public int getNumOfDiff() {
19642        return numOfDiff_;
19643      }
19644
19645      private void initFields() {
19646        type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF;
19647        inodeId_ = 0L;
19648        numOfDiff_ = 0;
19649      }
19650      private byte memoizedIsInitialized = -1;
19651      public final boolean isInitialized() {
19652        byte isInitialized = memoizedIsInitialized;
19653        if (isInitialized != -1) return isInitialized == 1;
19654
19655        if (!hasType()) {
19656          memoizedIsInitialized = 0;
19657          return false;
19658        }
19659        memoizedIsInitialized = 1;
19660        return true;
19661      }
19662
19663      public void writeTo(com.google.protobuf.CodedOutputStream output)
19664                          throws java.io.IOException {
19665        getSerializedSize();
19666        if (((bitField0_ & 0x00000001) == 0x00000001)) {
19667          output.writeEnum(1, type_.getNumber());
19668        }
19669        if (((bitField0_ & 0x00000002) == 0x00000002)) {
19670          output.writeUInt64(2, inodeId_);
19671        }
19672        if (((bitField0_ & 0x00000004) == 0x00000004)) {
19673          output.writeUInt32(3, numOfDiff_);
19674        }
19675        getUnknownFields().writeTo(output);
19676      }
19677
19678      private int memoizedSerializedSize = -1;
19679      public int getSerializedSize() {
19680        int size = memoizedSerializedSize;
19681        if (size != -1) return size;
19682
19683        size = 0;
19684        if (((bitField0_ & 0x00000001) == 0x00000001)) {
19685          size += com.google.protobuf.CodedOutputStream
19686            .computeEnumSize(1, type_.getNumber());
19687        }
19688        if (((bitField0_ & 0x00000002) == 0x00000002)) {
19689          size += com.google.protobuf.CodedOutputStream
19690            .computeUInt64Size(2, inodeId_);
19691        }
19692        if (((bitField0_ & 0x00000004) == 0x00000004)) {
19693          size += com.google.protobuf.CodedOutputStream
19694            .computeUInt32Size(3, numOfDiff_);
19695        }
19696        size += getUnknownFields().getSerializedSize();
19697        memoizedSerializedSize = size;
19698        return size;
19699      }
19700
19701      private static final long serialVersionUID = 0L;
19702      @java.lang.Override
19703      protected java.lang.Object writeReplace()
19704          throws java.io.ObjectStreamException {
19705        return super.writeReplace();
19706      }
19707
19708      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19709          com.google.protobuf.ByteString data)
19710          throws com.google.protobuf.InvalidProtocolBufferException {
19711        return PARSER.parseFrom(data);
19712      }
19713      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19714          com.google.protobuf.ByteString data,
19715          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19716          throws com.google.protobuf.InvalidProtocolBufferException {
19717        return PARSER.parseFrom(data, extensionRegistry);
19718      }
19719      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(byte[] data)
19720          throws com.google.protobuf.InvalidProtocolBufferException {
19721        return PARSER.parseFrom(data);
19722      }
19723      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19724          byte[] data,
19725          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19726          throws com.google.protobuf.InvalidProtocolBufferException {
19727        return PARSER.parseFrom(data, extensionRegistry);
19728      }
19729      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(java.io.InputStream input)
19730          throws java.io.IOException {
19731        return PARSER.parseFrom(input);
19732      }
19733      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19734          java.io.InputStream input,
19735          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19736          throws java.io.IOException {
19737        return PARSER.parseFrom(input, extensionRegistry);
19738      }
19739      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom(java.io.InputStream input)
19740          throws java.io.IOException {
19741        return PARSER.parseDelimitedFrom(input);
19742      }
19743      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom(
19744          java.io.InputStream input,
19745          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19746          throws java.io.IOException {
19747        return PARSER.parseDelimitedFrom(input, extensionRegistry);
19748      }
19749      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19750          com.google.protobuf.CodedInputStream input)
19751          throws java.io.IOException {
19752        return PARSER.parseFrom(input);
19753      }
19754      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
19755          com.google.protobuf.CodedInputStream input,
19756          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19757          throws java.io.IOException {
19758        return PARSER.parseFrom(input, extensionRegistry);
19759      }
19760
19761      public static Builder newBuilder() { return Builder.create(); }
19762      public Builder newBuilderForType() { return newBuilder(); }
19763      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry prototype) {
19764        return newBuilder().mergeFrom(prototype);
19765      }
19766      public Builder toBuilder() { return newBuilder(this); }
19767
19768      @java.lang.Override
19769      protected Builder newBuilderForType(
19770          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
19771        Builder builder = new Builder(parent);
19772        return builder;
19773      }
19774      /**
19775       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry}
19776       */
19777      public static final class Builder extends
19778          com.google.protobuf.GeneratedMessage.Builder<Builder>
19779         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntryOrBuilder {
19780        public static final com.google.protobuf.Descriptors.Descriptor
19781            getDescriptor() {
19782          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
19783        }
19784
19785        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
19786            internalGetFieldAccessorTable() {
19787          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable
19788              .ensureFieldAccessorsInitialized(
19789                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class);
19790        }
19791
19792        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.newBuilder()
19793        private Builder() {
19794          maybeForceBuilderInitialization();
19795        }
19796
19797        private Builder(
19798            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
19799          super(parent);
19800          maybeForceBuilderInitialization();
19801        }
19802        private void maybeForceBuilderInitialization() {
19803          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
19804          }
19805        }
19806        private static Builder create() {
19807          return new Builder();
19808        }
19809
19810        public Builder clear() {
19811          super.clear();
19812          type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF;
19813          bitField0_ = (bitField0_ & ~0x00000001);
19814          inodeId_ = 0L;
19815          bitField0_ = (bitField0_ & ~0x00000002);
19816          numOfDiff_ = 0;
19817          bitField0_ = (bitField0_ & ~0x00000004);
19818          return this;
19819        }
19820
19821        public Builder clone() {
19822          return create().mergeFrom(buildPartial());
19823        }
19824
19825        public com.google.protobuf.Descriptors.Descriptor
19826            getDescriptorForType() {
19827          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
19828        }
19829
19830        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstanceForType() {
19831          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance();
19832        }
19833
19834        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry build() {
19835          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = buildPartial();
19836          if (!result.isInitialized()) {
19837            throw newUninitializedMessageException(result);
19838          }
19839          return result;
19840        }
19841
19842        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry buildPartial() {
19843          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry(this);
19844          int from_bitField0_ = bitField0_;
19845          int to_bitField0_ = 0;
19846          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
19847            to_bitField0_ |= 0x00000001;
19848          }
19849          result.type_ = type_;
19850          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
19851            to_bitField0_ |= 0x00000002;
19852          }
19853          result.inodeId_ = inodeId_;
19854          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
19855            to_bitField0_ |= 0x00000004;
19856          }
19857          result.numOfDiff_ = numOfDiff_;
19858          result.bitField0_ = to_bitField0_;
19859          onBuilt();
19860          return result;
19861        }
19862
19863        public Builder mergeFrom(com.google.protobuf.Message other) {
19864          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) {
19865            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry)other);
19866          } else {
19867            super.mergeFrom(other);
19868            return this;
19869          }
19870        }
19871
19872        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry other) {
19873          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance()) return this;
19874          if (other.hasType()) {
19875            setType(other.getType());
19876          }
19877          if (other.hasInodeId()) {
19878            setInodeId(other.getInodeId());
19879          }
19880          if (other.hasNumOfDiff()) {
19881            setNumOfDiff(other.getNumOfDiff());
19882          }
19883          this.mergeUnknownFields(other.getUnknownFields());
19884          return this;
19885        }
19886
19887        public final boolean isInitialized() {
19888          if (!hasType()) {
19889            
19890            return false;
19891          }
19892          return true;
19893        }
19894
19895        public Builder mergeFrom(
19896            com.google.protobuf.CodedInputStream input,
19897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19898            throws java.io.IOException {
19899          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parsedMessage = null;
19900          try {
19901            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
19902          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
19903            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) e.getUnfinishedMessage();
19904            throw e;
19905          } finally {
19906            if (parsedMessage != null) {
19907              mergeFrom(parsedMessage);
19908            }
19909          }
19910          return this;
19911        }
19912        private int bitField0_;
19913
19914        // required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;
19915        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF;
19916        /**
19917         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19918         */
19919        public boolean hasType() {
19920          return ((bitField0_ & 0x00000001) == 0x00000001);
19921        }
19922        /**
19923         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19924         */
19925        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() {
19926          return type_;
19927        }
19928        /**
19929         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19930         */
19931        public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type value) {
19932          if (value == null) {
19933            throw new NullPointerException();
19934          }
19935          bitField0_ |= 0x00000001;
19936          type_ = value;
19937          onChanged();
19938          return this;
19939        }
19940        /**
19941         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
19942         */
19943        public Builder clearType() {
19944          bitField0_ = (bitField0_ & ~0x00000001);
19945          type_ = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF;
19946          onChanged();
19947          return this;
19948        }
19949
19950        // optional uint64 inodeId = 2;
19951        private long inodeId_ ;
19952        /**
19953         * <code>optional uint64 inodeId = 2;</code>
19954         */
19955        public boolean hasInodeId() {
19956          return ((bitField0_ & 0x00000002) == 0x00000002);
19957        }
19958        /**
19959         * <code>optional uint64 inodeId = 2;</code>
19960         */
19961        public long getInodeId() {
19962          return inodeId_;
19963        }
19964        /**
19965         * <code>optional uint64 inodeId = 2;</code>
19966         */
19967        public Builder setInodeId(long value) {
19968          bitField0_ |= 0x00000002;
19969          inodeId_ = value;
19970          onChanged();
19971          return this;
19972        }
19973        /**
19974         * <code>optional uint64 inodeId = 2;</code>
19975         */
19976        public Builder clearInodeId() {
19977          bitField0_ = (bitField0_ & ~0x00000002);
19978          inodeId_ = 0L;
19979          onChanged();
19980          return this;
19981        }
19982
19983        // optional uint32 numOfDiff = 3;
19984        private int numOfDiff_ ;
19985        /**
19986         * <code>optional uint32 numOfDiff = 3;</code>
19987         */
19988        public boolean hasNumOfDiff() {
19989          return ((bitField0_ & 0x00000004) == 0x00000004);
19990        }
19991        /**
19992         * <code>optional uint32 numOfDiff = 3;</code>
19993         */
19994        public int getNumOfDiff() {
19995          return numOfDiff_;
19996        }
19997        /**
19998         * <code>optional uint32 numOfDiff = 3;</code>
19999         */
20000        public Builder setNumOfDiff(int value) {
20001          bitField0_ |= 0x00000004;
20002          numOfDiff_ = value;
20003          onChanged();
20004          return this;
20005        }
20006        /**
20007         * <code>optional uint32 numOfDiff = 3;</code>
20008         */
20009        public Builder clearNumOfDiff() {
20010          bitField0_ = (bitField0_ & ~0x00000004);
20011          numOfDiff_ = 0;
20012          onChanged();
20013          return this;
20014        }
20015
20016        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
20017      }
20018
20019      static {
20020        defaultInstance = new DiffEntry(true);
20021        defaultInstance.initFields();
20022      }
20023
20024      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
20025    }
20026
20027    private void initFields() {
20028    }
20029    private byte memoizedIsInitialized = -1;
20030    public final boolean isInitialized() {
20031      byte isInitialized = memoizedIsInitialized;
20032      if (isInitialized != -1) return isInitialized == 1;
20033
20034      memoizedIsInitialized = 1;
20035      return true;
20036    }
20037
20038    public void writeTo(com.google.protobuf.CodedOutputStream output)
20039                        throws java.io.IOException {
20040      getSerializedSize();
20041      getUnknownFields().writeTo(output);
20042    }
20043
20044    private int memoizedSerializedSize = -1;
20045    public int getSerializedSize() {
20046      int size = memoizedSerializedSize;
20047      if (size != -1) return size;
20048
20049      size = 0;
20050      size += getUnknownFields().getSerializedSize();
20051      memoizedSerializedSize = size;
20052      return size;
20053    }
20054
20055    private static final long serialVersionUID = 0L;
20056    @java.lang.Override
20057    protected java.lang.Object writeReplace()
20058        throws java.io.ObjectStreamException {
20059      return super.writeReplace();
20060    }
20061
20062    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20063        com.google.protobuf.ByteString data)
20064        throws com.google.protobuf.InvalidProtocolBufferException {
20065      return PARSER.parseFrom(data);
20066    }
20067    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20068        com.google.protobuf.ByteString data,
20069        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20070        throws com.google.protobuf.InvalidProtocolBufferException {
20071      return PARSER.parseFrom(data, extensionRegistry);
20072    }
20073    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(byte[] data)
20074        throws com.google.protobuf.InvalidProtocolBufferException {
20075      return PARSER.parseFrom(data);
20076    }
20077    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20078        byte[] data,
20079        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20080        throws com.google.protobuf.InvalidProtocolBufferException {
20081      return PARSER.parseFrom(data, extensionRegistry);
20082    }
20083    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(java.io.InputStream input)
20084        throws java.io.IOException {
20085      return PARSER.parseFrom(input);
20086    }
20087    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20088        java.io.InputStream input,
20089        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20090        throws java.io.IOException {
20091      return PARSER.parseFrom(input, extensionRegistry);
20092    }
20093    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom(java.io.InputStream input)
20094        throws java.io.IOException {
20095      return PARSER.parseDelimitedFrom(input);
20096    }
20097    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom(
20098        java.io.InputStream input,
20099        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20100        throws java.io.IOException {
20101      return PARSER.parseDelimitedFrom(input, extensionRegistry);
20102    }
20103    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20104        com.google.protobuf.CodedInputStream input)
20105        throws java.io.IOException {
20106      return PARSER.parseFrom(input);
20107    }
20108    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
20109        com.google.protobuf.CodedInputStream input,
20110        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20111        throws java.io.IOException {
20112      return PARSER.parseFrom(input, extensionRegistry);
20113    }
20114
20115    public static Builder newBuilder() { return Builder.create(); }
20116    public Builder newBuilderForType() { return newBuilder(); }
20117    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection prototype) {
20118      return newBuilder().mergeFrom(prototype);
20119    }
20120    public Builder toBuilder() { return newBuilder(this); }
20121
20122    @java.lang.Override
20123    protected Builder newBuilderForType(
20124        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20125      Builder builder = new Builder(parent);
20126      return builder;
20127    }
20128    /**
20129     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection}
20130     *
20131     * <pre>
20132     **
20133     * This section records information about snapshot diffs
20134     * NAME: SNAPSHOT_DIFF
20135     * </pre>
20136     */
20137    public static final class Builder extends
20138        com.google.protobuf.GeneratedMessage.Builder<Builder>
20139       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSectionOrBuilder {
20140      public static final com.google.protobuf.Descriptors.Descriptor
20141          getDescriptor() {
20142        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
20143      }
20144
20145      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20146          internalGetFieldAccessorTable() {
20147        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable
20148            .ensureFieldAccessorsInitialized(
20149                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class);
20150      }
20151
20152      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.newBuilder()
20153      private Builder() {
20154        maybeForceBuilderInitialization();
20155      }
20156
20157      private Builder(
20158          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20159        super(parent);
20160        maybeForceBuilderInitialization();
20161      }
20162      private void maybeForceBuilderInitialization() {
20163        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
20164        }
20165      }
20166      private static Builder create() {
20167        return new Builder();
20168      }
20169
20170      public Builder clear() {
20171        super.clear();
20172        return this;
20173      }
20174
20175      public Builder clone() {
20176        return create().mergeFrom(buildPartial());
20177      }
20178
20179      public com.google.protobuf.Descriptors.Descriptor
20180          getDescriptorForType() {
20181        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
20182      }
20183
20184      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstanceForType() {
20185        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance();
20186      }
20187
20188      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection build() {
20189        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = buildPartial();
20190        if (!result.isInitialized()) {
20191          throw newUninitializedMessageException(result);
20192        }
20193        return result;
20194      }
20195
20196      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection buildPartial() {
20197        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection(this);
20198        onBuilt();
20199        return result;
20200      }
20201
20202      public Builder mergeFrom(com.google.protobuf.Message other) {
20203        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) {
20204          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection)other);
20205        } else {
20206          super.mergeFrom(other);
20207          return this;
20208        }
20209      }
20210
20211      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection other) {
20212        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance()) return this;
20213        this.mergeUnknownFields(other.getUnknownFields());
20214        return this;
20215      }
20216
20217      public final boolean isInitialized() {
20218        return true;
20219      }
20220
20221      public Builder mergeFrom(
20222          com.google.protobuf.CodedInputStream input,
20223          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20224          throws java.io.IOException {
20225        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parsedMessage = null;
20226        try {
20227          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
20228        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20229          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) e.getUnfinishedMessage();
20230          throw e;
20231        } finally {
20232          if (parsedMessage != null) {
20233            mergeFrom(parsedMessage);
20234          }
20235        }
20236        return this;
20237      }
20238
20239      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection)
20240    }
20241
20242    static {
20243      defaultInstance = new SnapshotDiffSection(true);
20244      defaultInstance.initFields();
20245    }
20246
20247    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection)
20248  }
20249
20250  public interface StringTableSectionOrBuilder
20251      extends com.google.protobuf.MessageOrBuilder {
20252
20253    // optional uint32 numEntry = 1;
20254    /**
20255     * <code>optional uint32 numEntry = 1;</code>
20256     *
20257     * <pre>
20258     * repeated Entry
20259     * </pre>
20260     */
20261    boolean hasNumEntry();
20262    /**
20263     * <code>optional uint32 numEntry = 1;</code>
20264     *
20265     * <pre>
20266     * repeated Entry
20267     * </pre>
20268     */
20269    int getNumEntry();
20270  }
20271  /**
20272   * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection}
20273   *
20274   * <pre>
20275   **
20276   * This section maps string to id
20277   * NAME: STRING_TABLE
20278   * </pre>
20279   */
20280  public static final class StringTableSection extends
20281      com.google.protobuf.GeneratedMessage
20282      implements StringTableSectionOrBuilder {
20283    // Use StringTableSection.newBuilder() to construct.
20284    private StringTableSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
20285      super(builder);
20286      this.unknownFields = builder.getUnknownFields();
20287    }
20288    private StringTableSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
20289
20290    private static final StringTableSection defaultInstance;
20291    public static StringTableSection getDefaultInstance() {
20292      return defaultInstance;
20293    }
20294
20295    public StringTableSection getDefaultInstanceForType() {
20296      return defaultInstance;
20297    }
20298
20299    private final com.google.protobuf.UnknownFieldSet unknownFields;
20300    @java.lang.Override
20301    public final com.google.protobuf.UnknownFieldSet
20302        getUnknownFields() {
20303      return this.unknownFields;
20304    }
20305    private StringTableSection(
20306        com.google.protobuf.CodedInputStream input,
20307        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20308        throws com.google.protobuf.InvalidProtocolBufferException {
20309      initFields();
20310      int mutable_bitField0_ = 0;
20311      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
20312          com.google.protobuf.UnknownFieldSet.newBuilder();
20313      try {
20314        boolean done = false;
20315        while (!done) {
20316          int tag = input.readTag();
20317          switch (tag) {
20318            case 0:
20319              done = true;
20320              break;
20321            default: {
20322              if (!parseUnknownField(input, unknownFields,
20323                                     extensionRegistry, tag)) {
20324                done = true;
20325              }
20326              break;
20327            }
20328            case 8: {
20329              bitField0_ |= 0x00000001;
20330              numEntry_ = input.readUInt32();
20331              break;
20332            }
20333          }
20334        }
20335      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20336        throw e.setUnfinishedMessage(this);
20337      } catch (java.io.IOException e) {
20338        throw new com.google.protobuf.InvalidProtocolBufferException(
20339            e.getMessage()).setUnfinishedMessage(this);
20340      } finally {
20341        this.unknownFields = unknownFields.build();
20342        makeExtensionsImmutable();
20343      }
20344    }
20345    public static final com.google.protobuf.Descriptors.Descriptor
20346        getDescriptor() {
20347      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
20348    }
20349
20350    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20351        internalGetFieldAccessorTable() {
20352      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable
20353          .ensureFieldAccessorsInitialized(
20354              org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class);
20355    }
20356
20357    public static com.google.protobuf.Parser<StringTableSection> PARSER =
20358        new com.google.protobuf.AbstractParser<StringTableSection>() {
20359      public StringTableSection parsePartialFrom(
20360          com.google.protobuf.CodedInputStream input,
20361          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20362          throws com.google.protobuf.InvalidProtocolBufferException {
20363        return new StringTableSection(input, extensionRegistry);
20364      }
20365    };
20366
20367    @java.lang.Override
20368    public com.google.protobuf.Parser<StringTableSection> getParserForType() {
20369      return PARSER;
20370    }
20371
20372    public interface EntryOrBuilder
20373        extends com.google.protobuf.MessageOrBuilder {
20374
20375      // optional uint32 id = 1;
20376      /**
20377       * <code>optional uint32 id = 1;</code>
20378       */
20379      boolean hasId();
20380      /**
20381       * <code>optional uint32 id = 1;</code>
20382       */
20383      int getId();
20384
20385      // optional string str = 2;
20386      /**
20387       * <code>optional string str = 2;</code>
20388       */
20389      boolean hasStr();
20390      /**
20391       * <code>optional string str = 2;</code>
20392       */
20393      java.lang.String getStr();
20394      /**
20395       * <code>optional string str = 2;</code>
20396       */
20397      com.google.protobuf.ByteString
20398          getStrBytes();
20399    }
20400    /**
20401     * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry}
20402     */
20403    public static final class Entry extends
20404        com.google.protobuf.GeneratedMessage
20405        implements EntryOrBuilder {
20406      // Use Entry.newBuilder() to construct.
20407      private Entry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
20408        super(builder);
20409        this.unknownFields = builder.getUnknownFields();
20410      }
20411      private Entry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
20412
20413      private static final Entry defaultInstance;
20414      public static Entry getDefaultInstance() {
20415        return defaultInstance;
20416      }
20417
20418      public Entry getDefaultInstanceForType() {
20419        return defaultInstance;
20420      }
20421
20422      private final com.google.protobuf.UnknownFieldSet unknownFields;
20423      @java.lang.Override
20424      public final com.google.protobuf.UnknownFieldSet
20425          getUnknownFields() {
20426        return this.unknownFields;
20427      }
20428      private Entry(
20429          com.google.protobuf.CodedInputStream input,
20430          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20431          throws com.google.protobuf.InvalidProtocolBufferException {
20432        initFields();
20433        int mutable_bitField0_ = 0;
20434        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
20435            com.google.protobuf.UnknownFieldSet.newBuilder();
20436        try {
20437          boolean done = false;
20438          while (!done) {
20439            int tag = input.readTag();
20440            switch (tag) {
20441              case 0:
20442                done = true;
20443                break;
20444              default: {
20445                if (!parseUnknownField(input, unknownFields,
20446                                       extensionRegistry, tag)) {
20447                  done = true;
20448                }
20449                break;
20450              }
20451              case 8: {
20452                bitField0_ |= 0x00000001;
20453                id_ = input.readUInt32();
20454                break;
20455              }
20456              case 18: {
20457                bitField0_ |= 0x00000002;
20458                str_ = input.readBytes();
20459                break;
20460              }
20461            }
20462          }
20463        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20464          throw e.setUnfinishedMessage(this);
20465        } catch (java.io.IOException e) {
20466          throw new com.google.protobuf.InvalidProtocolBufferException(
20467              e.getMessage()).setUnfinishedMessage(this);
20468        } finally {
20469          this.unknownFields = unknownFields.build();
20470          makeExtensionsImmutable();
20471        }
20472      }
20473      public static final com.google.protobuf.Descriptors.Descriptor
20474          getDescriptor() {
20475        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
20476      }
20477
20478      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20479          internalGetFieldAccessorTable() {
20480        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable
20481            .ensureFieldAccessorsInitialized(
20482                org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class);
20483      }
20484
20485      public static com.google.protobuf.Parser<Entry> PARSER =
20486          new com.google.protobuf.AbstractParser<Entry>() {
20487        public Entry parsePartialFrom(
20488            com.google.protobuf.CodedInputStream input,
20489            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20490            throws com.google.protobuf.InvalidProtocolBufferException {
20491          return new Entry(input, extensionRegistry);
20492        }
20493      };
20494
20495      @java.lang.Override
20496      public com.google.protobuf.Parser<Entry> getParserForType() {
20497        return PARSER;
20498      }
20499
20500      private int bitField0_;
20501      // optional uint32 id = 1;
20502      public static final int ID_FIELD_NUMBER = 1;
20503      private int id_;
20504      /**
20505       * <code>optional uint32 id = 1;</code>
20506       */
20507      public boolean hasId() {
20508        return ((bitField0_ & 0x00000001) == 0x00000001);
20509      }
20510      /**
20511       * <code>optional uint32 id = 1;</code>
20512       */
20513      public int getId() {
20514        return id_;
20515      }
20516
20517      // optional string str = 2;
20518      public static final int STR_FIELD_NUMBER = 2;
20519      private java.lang.Object str_;
20520      /**
20521       * <code>optional string str = 2;</code>
20522       */
20523      public boolean hasStr() {
20524        return ((bitField0_ & 0x00000002) == 0x00000002);
20525      }
20526      /**
20527       * <code>optional string str = 2;</code>
20528       */
20529      public java.lang.String getStr() {
20530        java.lang.Object ref = str_;
20531        if (ref instanceof java.lang.String) {
20532          return (java.lang.String) ref;
20533        } else {
20534          com.google.protobuf.ByteString bs = 
20535              (com.google.protobuf.ByteString) ref;
20536          java.lang.String s = bs.toStringUtf8();
20537          if (bs.isValidUtf8()) {
20538            str_ = s;
20539          }
20540          return s;
20541        }
20542      }
20543      /**
20544       * <code>optional string str = 2;</code>
20545       */
20546      public com.google.protobuf.ByteString
20547          getStrBytes() {
20548        java.lang.Object ref = str_;
20549        if (ref instanceof java.lang.String) {
20550          com.google.protobuf.ByteString b = 
20551              com.google.protobuf.ByteString.copyFromUtf8(
20552                  (java.lang.String) ref);
20553          str_ = b;
20554          return b;
20555        } else {
20556          return (com.google.protobuf.ByteString) ref;
20557        }
20558      }
20559
20560      private void initFields() {
20561        id_ = 0;
20562        str_ = "";
20563      }
20564      private byte memoizedIsInitialized = -1;
20565      public final boolean isInitialized() {
20566        byte isInitialized = memoizedIsInitialized;
20567        if (isInitialized != -1) return isInitialized == 1;
20568
20569        memoizedIsInitialized = 1;
20570        return true;
20571      }
20572
20573      public void writeTo(com.google.protobuf.CodedOutputStream output)
20574                          throws java.io.IOException {
20575        getSerializedSize();
20576        if (((bitField0_ & 0x00000001) == 0x00000001)) {
20577          output.writeUInt32(1, id_);
20578        }
20579        if (((bitField0_ & 0x00000002) == 0x00000002)) {
20580          output.writeBytes(2, getStrBytes());
20581        }
20582        getUnknownFields().writeTo(output);
20583      }
20584
20585      private int memoizedSerializedSize = -1;
20586      public int getSerializedSize() {
20587        int size = memoizedSerializedSize;
20588        if (size != -1) return size;
20589
20590        size = 0;
20591        if (((bitField0_ & 0x00000001) == 0x00000001)) {
20592          size += com.google.protobuf.CodedOutputStream
20593            .computeUInt32Size(1, id_);
20594        }
20595        if (((bitField0_ & 0x00000002) == 0x00000002)) {
20596          size += com.google.protobuf.CodedOutputStream
20597            .computeBytesSize(2, getStrBytes());
20598        }
20599        size += getUnknownFields().getSerializedSize();
20600        memoizedSerializedSize = size;
20601        return size;
20602      }
20603
20604      private static final long serialVersionUID = 0L;
20605      @java.lang.Override
20606      protected java.lang.Object writeReplace()
20607          throws java.io.ObjectStreamException {
20608        return super.writeReplace();
20609      }
20610
20611      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20612          com.google.protobuf.ByteString data)
20613          throws com.google.protobuf.InvalidProtocolBufferException {
20614        return PARSER.parseFrom(data);
20615      }
20616      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20617          com.google.protobuf.ByteString data,
20618          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20619          throws com.google.protobuf.InvalidProtocolBufferException {
20620        return PARSER.parseFrom(data, extensionRegistry);
20621      }
20622      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(byte[] data)
20623          throws com.google.protobuf.InvalidProtocolBufferException {
20624        return PARSER.parseFrom(data);
20625      }
20626      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20627          byte[] data,
20628          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20629          throws com.google.protobuf.InvalidProtocolBufferException {
20630        return PARSER.parseFrom(data, extensionRegistry);
20631      }
20632      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(java.io.InputStream input)
20633          throws java.io.IOException {
20634        return PARSER.parseFrom(input);
20635      }
20636      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20637          java.io.InputStream input,
20638          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20639          throws java.io.IOException {
20640        return PARSER.parseFrom(input, extensionRegistry);
20641      }
20642      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom(java.io.InputStream input)
20643          throws java.io.IOException {
20644        return PARSER.parseDelimitedFrom(input);
20645      }
20646      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom(
20647          java.io.InputStream input,
20648          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20649          throws java.io.IOException {
20650        return PARSER.parseDelimitedFrom(input, extensionRegistry);
20651      }
20652      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20653          com.google.protobuf.CodedInputStream input)
20654          throws java.io.IOException {
20655        return PARSER.parseFrom(input);
20656      }
20657      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
20658          com.google.protobuf.CodedInputStream input,
20659          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20660          throws java.io.IOException {
20661        return PARSER.parseFrom(input, extensionRegistry);
20662      }
20663
20664      public static Builder newBuilder() { return Builder.create(); }
20665      public Builder newBuilderForType() { return newBuilder(); }
20666      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry prototype) {
20667        return newBuilder().mergeFrom(prototype);
20668      }
20669      public Builder toBuilder() { return newBuilder(this); }
20670
20671      @java.lang.Override
20672      protected Builder newBuilderForType(
20673          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20674        Builder builder = new Builder(parent);
20675        return builder;
20676      }
20677      /**
20678       * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry}
20679       */
20680      public static final class Builder extends
20681          com.google.protobuf.GeneratedMessage.Builder<Builder>
20682         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.EntryOrBuilder {
20683        public static final com.google.protobuf.Descriptors.Descriptor
20684            getDescriptor() {
20685          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
20686        }
20687
20688        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20689            internalGetFieldAccessorTable() {
20690          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable
20691              .ensureFieldAccessorsInitialized(
20692                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class);
20693        }
20694
20695        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.newBuilder()
20696        private Builder() {
20697          maybeForceBuilderInitialization();
20698        }
20699
20700        private Builder(
20701            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20702          super(parent);
20703          maybeForceBuilderInitialization();
20704        }
20705        private void maybeForceBuilderInitialization() {
20706          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
20707          }
20708        }
20709        private static Builder create() {
20710          return new Builder();
20711        }
20712
20713        public Builder clear() {
20714          super.clear();
20715          id_ = 0;
20716          bitField0_ = (bitField0_ & ~0x00000001);
20717          str_ = "";
20718          bitField0_ = (bitField0_ & ~0x00000002);
20719          return this;
20720        }
20721
20722        public Builder clone() {
20723          return create().mergeFrom(buildPartial());
20724        }
20725
20726        public com.google.protobuf.Descriptors.Descriptor
20727            getDescriptorForType() {
20728          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
20729        }
20730
20731        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstanceForType() {
20732          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance();
20733        }
20734
20735        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry build() {
20736          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = buildPartial();
20737          if (!result.isInitialized()) {
20738            throw newUninitializedMessageException(result);
20739          }
20740          return result;
20741        }
20742
20743        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry buildPartial() {
20744          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry(this);
20745          int from_bitField0_ = bitField0_;
20746          int to_bitField0_ = 0;
20747          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
20748            to_bitField0_ |= 0x00000001;
20749          }
20750          result.id_ = id_;
20751          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
20752            to_bitField0_ |= 0x00000002;
20753          }
20754          result.str_ = str_;
20755          result.bitField0_ = to_bitField0_;
20756          onBuilt();
20757          return result;
20758        }
20759
20760        public Builder mergeFrom(com.google.protobuf.Message other) {
20761          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) {
20762            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry)other);
20763          } else {
20764            super.mergeFrom(other);
20765            return this;
20766          }
20767        }
20768
20769        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry other) {
20770          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance()) return this;
20771          if (other.hasId()) {
20772            setId(other.getId());
20773          }
20774          if (other.hasStr()) {
20775            bitField0_ |= 0x00000002;
20776            str_ = other.str_;
20777            onChanged();
20778          }
20779          this.mergeUnknownFields(other.getUnknownFields());
20780          return this;
20781        }
20782
20783        public final boolean isInitialized() {
20784          return true;
20785        }
20786
20787        public Builder mergeFrom(
20788            com.google.protobuf.CodedInputStream input,
20789            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20790            throws java.io.IOException {
20791          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parsedMessage = null;
20792          try {
20793            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
20794          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20795            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) e.getUnfinishedMessage();
20796            throw e;
20797          } finally {
20798            if (parsedMessage != null) {
20799              mergeFrom(parsedMessage);
20800            }
20801          }
20802          return this;
20803        }
20804        private int bitField0_;
20805
20806        // optional uint32 id = 1;
20807        private int id_ ;
20808        /**
20809         * <code>optional uint32 id = 1;</code>
20810         */
20811        public boolean hasId() {
20812          return ((bitField0_ & 0x00000001) == 0x00000001);
20813        }
20814        /**
20815         * <code>optional uint32 id = 1;</code>
20816         */
20817        public int getId() {
20818          return id_;
20819        }
20820        /**
20821         * <code>optional uint32 id = 1;</code>
20822         */
20823        public Builder setId(int value) {
20824          bitField0_ |= 0x00000001;
20825          id_ = value;
20826          onChanged();
20827          return this;
20828        }
20829        /**
20830         * <code>optional uint32 id = 1;</code>
20831         */
20832        public Builder clearId() {
20833          bitField0_ = (bitField0_ & ~0x00000001);
20834          id_ = 0;
20835          onChanged();
20836          return this;
20837        }
20838
20839        // optional string str = 2;
20840        private java.lang.Object str_ = "";
20841        /**
20842         * <code>optional string str = 2;</code>
20843         */
20844        public boolean hasStr() {
20845          return ((bitField0_ & 0x00000002) == 0x00000002);
20846        }
20847        /**
20848         * <code>optional string str = 2;</code>
20849         */
20850        public java.lang.String getStr() {
20851          java.lang.Object ref = str_;
20852          if (!(ref instanceof java.lang.String)) {
20853            java.lang.String s = ((com.google.protobuf.ByteString) ref)
20854                .toStringUtf8();
20855            str_ = s;
20856            return s;
20857          } else {
20858            return (java.lang.String) ref;
20859          }
20860        }
20861        /**
20862         * <code>optional string str = 2;</code>
20863         */
20864        public com.google.protobuf.ByteString
20865            getStrBytes() {
20866          java.lang.Object ref = str_;
20867          if (ref instanceof String) {
20868            com.google.protobuf.ByteString b = 
20869                com.google.protobuf.ByteString.copyFromUtf8(
20870                    (java.lang.String) ref);
20871            str_ = b;
20872            return b;
20873          } else {
20874            return (com.google.protobuf.ByteString) ref;
20875          }
20876        }
20877        /**
20878         * <code>optional string str = 2;</code>
20879         */
20880        public Builder setStr(
20881            java.lang.String value) {
20882          if (value == null) {
20883    throw new NullPointerException();
20884  }
20885  bitField0_ |= 0x00000002;
20886          str_ = value;
20887          onChanged();
20888          return this;
20889        }
20890        /**
20891         * <code>optional string str = 2;</code>
20892         */
20893        public Builder clearStr() {
20894          bitField0_ = (bitField0_ & ~0x00000002);
20895          str_ = getDefaultInstance().getStr();
20896          onChanged();
20897          return this;
20898        }
20899        /**
20900         * <code>optional string str = 2;</code>
20901         */
20902        public Builder setStrBytes(
20903            com.google.protobuf.ByteString value) {
20904          if (value == null) {
20905    throw new NullPointerException();
20906  }
20907  bitField0_ |= 0x00000002;
20908          str_ = value;
20909          onChanged();
20910          return this;
20911        }
20912
20913        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection.Entry)
20914      }
20915
20916      static {
20917        defaultInstance = new Entry(true);
20918        defaultInstance.initFields();
20919      }
20920
20921      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection.Entry)
20922    }
20923
20924    private int bitField0_;
20925    // optional uint32 numEntry = 1;
20926    public static final int NUMENTRY_FIELD_NUMBER = 1;
20927    private int numEntry_;
20928    /**
20929     * <code>optional uint32 numEntry = 1;</code>
20930     *
20931     * <pre>
20932     * repeated Entry
20933     * </pre>
20934     */
20935    public boolean hasNumEntry() {
20936      return ((bitField0_ & 0x00000001) == 0x00000001);
20937    }
20938    /**
20939     * <code>optional uint32 numEntry = 1;</code>
20940     *
20941     * <pre>
20942     * repeated Entry
20943     * </pre>
20944     */
20945    public int getNumEntry() {
20946      return numEntry_;
20947    }
20948
20949    private void initFields() {
20950      numEntry_ = 0;
20951    }
20952    private byte memoizedIsInitialized = -1;
20953    public final boolean isInitialized() {
20954      byte isInitialized = memoizedIsInitialized;
20955      if (isInitialized != -1) return isInitialized == 1;
20956
20957      memoizedIsInitialized = 1;
20958      return true;
20959    }
20960
20961    public void writeTo(com.google.protobuf.CodedOutputStream output)
20962                        throws java.io.IOException {
20963      getSerializedSize();
20964      if (((bitField0_ & 0x00000001) == 0x00000001)) {
20965        output.writeUInt32(1, numEntry_);
20966      }
20967      getUnknownFields().writeTo(output);
20968    }
20969
20970    private int memoizedSerializedSize = -1;
20971    public int getSerializedSize() {
20972      int size = memoizedSerializedSize;
20973      if (size != -1) return size;
20974
20975      size = 0;
20976      if (((bitField0_ & 0x00000001) == 0x00000001)) {
20977        size += com.google.protobuf.CodedOutputStream
20978          .computeUInt32Size(1, numEntry_);
20979      }
20980      size += getUnknownFields().getSerializedSize();
20981      memoizedSerializedSize = size;
20982      return size;
20983    }
20984
20985    private static final long serialVersionUID = 0L;
20986    @java.lang.Override
20987    protected java.lang.Object writeReplace()
20988        throws java.io.ObjectStreamException {
20989      return super.writeReplace();
20990    }
20991
20992    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
20993        com.google.protobuf.ByteString data)
20994        throws com.google.protobuf.InvalidProtocolBufferException {
20995      return PARSER.parseFrom(data);
20996    }
20997    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
20998        com.google.protobuf.ByteString data,
20999        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21000        throws com.google.protobuf.InvalidProtocolBufferException {
21001      return PARSER.parseFrom(data, extensionRegistry);
21002    }
21003    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(byte[] data)
21004        throws com.google.protobuf.InvalidProtocolBufferException {
21005      return PARSER.parseFrom(data);
21006    }
21007    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
21008        byte[] data,
21009        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21010        throws com.google.protobuf.InvalidProtocolBufferException {
21011      return PARSER.parseFrom(data, extensionRegistry);
21012    }
21013    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(java.io.InputStream input)
21014        throws java.io.IOException {
21015      return PARSER.parseFrom(input);
21016    }
21017    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
21018        java.io.InputStream input,
21019        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21020        throws java.io.IOException {
21021      return PARSER.parseFrom(input, extensionRegistry);
21022    }
21023    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom(java.io.InputStream input)
21024        throws java.io.IOException {
21025      return PARSER.parseDelimitedFrom(input);
21026    }
21027    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom(
21028        java.io.InputStream input,
21029        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21030        throws java.io.IOException {
21031      return PARSER.parseDelimitedFrom(input, extensionRegistry);
21032    }
21033    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
21034        com.google.protobuf.CodedInputStream input)
21035        throws java.io.IOException {
21036      return PARSER.parseFrom(input);
21037    }
21038    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
21039        com.google.protobuf.CodedInputStream input,
21040        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21041        throws java.io.IOException {
21042      return PARSER.parseFrom(input, extensionRegistry);
21043    }
21044
21045    public static Builder newBuilder() { return Builder.create(); }
21046    public Builder newBuilderForType() { return newBuilder(); }
21047    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection prototype) {
21048      return newBuilder().mergeFrom(prototype);
21049    }
21050    public Builder toBuilder() { return newBuilder(this); }
21051
21052    @java.lang.Override
21053    protected Builder newBuilderForType(
21054        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21055      Builder builder = new Builder(parent);
21056      return builder;
21057    }
21058    /**
21059     * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection}
21060     *
21061     * <pre>
21062     **
21063     * This section maps string to id
21064     * NAME: STRING_TABLE
21065     * </pre>
21066     */
21067    public static final class Builder extends
21068        com.google.protobuf.GeneratedMessage.Builder<Builder>
21069       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSectionOrBuilder {
21070      public static final com.google.protobuf.Descriptors.Descriptor
21071          getDescriptor() {
21072        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
21073      }
21074
21075      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21076          internalGetFieldAccessorTable() {
21077        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable
21078            .ensureFieldAccessorsInitialized(
21079                org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class);
21080      }
21081
21082      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.newBuilder()
21083      private Builder() {
21084        maybeForceBuilderInitialization();
21085      }
21086
21087      private Builder(
21088          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21089        super(parent);
21090        maybeForceBuilderInitialization();
21091      }
21092      private void maybeForceBuilderInitialization() {
21093        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
21094        }
21095      }
21096      private static Builder create() {
21097        return new Builder();
21098      }
21099
21100      public Builder clear() {
21101        super.clear();
21102        numEntry_ = 0;
21103        bitField0_ = (bitField0_ & ~0x00000001);
21104        return this;
21105      }
21106
21107      public Builder clone() {
21108        return create().mergeFrom(buildPartial());
21109      }
21110
21111      public com.google.protobuf.Descriptors.Descriptor
21112          getDescriptorForType() {
21113        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
21114      }
21115
21116      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstanceForType() {
21117        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance();
21118      }
21119
21120      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection build() {
21121        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = buildPartial();
21122        if (!result.isInitialized()) {
21123          throw newUninitializedMessageException(result);
21124        }
21125        return result;
21126      }
21127
21128      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection buildPartial() {
21129        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection(this);
21130        int from_bitField0_ = bitField0_;
21131        int to_bitField0_ = 0;
21132        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
21133          to_bitField0_ |= 0x00000001;
21134        }
21135        result.numEntry_ = numEntry_;
21136        result.bitField0_ = to_bitField0_;
21137        onBuilt();
21138        return result;
21139      }
21140
21141      public Builder mergeFrom(com.google.protobuf.Message other) {
21142        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) {
21143          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection)other);
21144        } else {
21145          super.mergeFrom(other);
21146          return this;
21147        }
21148      }
21149
21150      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection other) {
21151        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance()) return this;
21152        if (other.hasNumEntry()) {
21153          setNumEntry(other.getNumEntry());
21154        }
21155        this.mergeUnknownFields(other.getUnknownFields());
21156        return this;
21157      }
21158
21159      public final boolean isInitialized() {
21160        return true;
21161      }
21162
21163      public Builder mergeFrom(
21164          com.google.protobuf.CodedInputStream input,
21165          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21166          throws java.io.IOException {
21167        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parsedMessage = null;
21168        try {
21169          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
21170        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21171          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) e.getUnfinishedMessage();
21172          throw e;
21173        } finally {
21174          if (parsedMessage != null) {
21175            mergeFrom(parsedMessage);
21176          }
21177        }
21178        return this;
21179      }
21180      private int bitField0_;
21181
21182      // optional uint32 numEntry = 1;
21183      private int numEntry_ ;
21184      /**
21185       * <code>optional uint32 numEntry = 1;</code>
21186       *
21187       * <pre>
21188       * repeated Entry
21189       * </pre>
21190       */
21191      public boolean hasNumEntry() {
21192        return ((bitField0_ & 0x00000001) == 0x00000001);
21193      }
21194      /**
21195       * <code>optional uint32 numEntry = 1;</code>
21196       *
21197       * <pre>
21198       * repeated Entry
21199       * </pre>
21200       */
21201      public int getNumEntry() {
21202        return numEntry_;
21203      }
21204      /**
21205       * <code>optional uint32 numEntry = 1;</code>
21206       *
21207       * <pre>
21208       * repeated Entry
21209       * </pre>
21210       */
21211      public Builder setNumEntry(int value) {
21212        bitField0_ |= 0x00000001;
21213        numEntry_ = value;
21214        onChanged();
21215        return this;
21216      }
21217      /**
21218       * <code>optional uint32 numEntry = 1;</code>
21219       *
21220       * <pre>
21221       * repeated Entry
21222       * </pre>
21223       */
21224      public Builder clearNumEntry() {
21225        bitField0_ = (bitField0_ & ~0x00000001);
21226        numEntry_ = 0;
21227        onChanged();
21228        return this;
21229      }
21230
21231      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection)
21232    }
21233
21234    static {
21235      defaultInstance = new StringTableSection(true);
21236      defaultInstance.initFields();
21237    }
21238
21239    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection)
21240  }
21241
21242  public interface SecretManagerSectionOrBuilder
21243      extends com.google.protobuf.MessageOrBuilder {
21244
21245    // optional uint32 currentId = 1;
21246    /**
21247     * <code>optional uint32 currentId = 1;</code>
21248     */
21249    boolean hasCurrentId();
21250    /**
21251     * <code>optional uint32 currentId = 1;</code>
21252     */
21253    int getCurrentId();
21254
21255    // optional uint32 tokenSequenceNumber = 2;
21256    /**
21257     * <code>optional uint32 tokenSequenceNumber = 2;</code>
21258     */
21259    boolean hasTokenSequenceNumber();
21260    /**
21261     * <code>optional uint32 tokenSequenceNumber = 2;</code>
21262     */
21263    int getTokenSequenceNumber();
21264
21265    // optional uint32 numKeys = 3;
21266    /**
21267     * <code>optional uint32 numKeys = 3;</code>
21268     */
21269    boolean hasNumKeys();
21270    /**
21271     * <code>optional uint32 numKeys = 3;</code>
21272     */
21273    int getNumKeys();
21274
21275    // optional uint32 numTokens = 4;
21276    /**
21277     * <code>optional uint32 numTokens = 4;</code>
21278     *
21279     * <pre>
21280     * repeated DelegationKey keys
21281     * repeated PersistToken tokens
21282     * </pre>
21283     */
21284    boolean hasNumTokens();
21285    /**
21286     * <code>optional uint32 numTokens = 4;</code>
21287     *
21288     * <pre>
21289     * repeated DelegationKey keys
21290     * repeated PersistToken tokens
21291     * </pre>
21292     */
21293    int getNumTokens();
21294  }
21295  /**
21296   * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection}
21297   */
21298  public static final class SecretManagerSection extends
21299      com.google.protobuf.GeneratedMessage
21300      implements SecretManagerSectionOrBuilder {
21301    // Use SecretManagerSection.newBuilder() to construct.
21302    private SecretManagerSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
21303      super(builder);
21304      this.unknownFields = builder.getUnknownFields();
21305    }
21306    private SecretManagerSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
21307
21308    private static final SecretManagerSection defaultInstance;
21309    public static SecretManagerSection getDefaultInstance() {
21310      return defaultInstance;
21311    }
21312
21313    public SecretManagerSection getDefaultInstanceForType() {
21314      return defaultInstance;
21315    }
21316
21317    private final com.google.protobuf.UnknownFieldSet unknownFields;
21318    @java.lang.Override
21319    public final com.google.protobuf.UnknownFieldSet
21320        getUnknownFields() {
21321      return this.unknownFields;
21322    }
21323    private SecretManagerSection(
21324        com.google.protobuf.CodedInputStream input,
21325        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21326        throws com.google.protobuf.InvalidProtocolBufferException {
21327      initFields();
21328      int mutable_bitField0_ = 0;
21329      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
21330          com.google.protobuf.UnknownFieldSet.newBuilder();
21331      try {
21332        boolean done = false;
21333        while (!done) {
21334          int tag = input.readTag();
21335          switch (tag) {
21336            case 0:
21337              done = true;
21338              break;
21339            default: {
21340              if (!parseUnknownField(input, unknownFields,
21341                                     extensionRegistry, tag)) {
21342                done = true;
21343              }
21344              break;
21345            }
21346            case 8: {
21347              bitField0_ |= 0x00000001;
21348              currentId_ = input.readUInt32();
21349              break;
21350            }
21351            case 16: {
21352              bitField0_ |= 0x00000002;
21353              tokenSequenceNumber_ = input.readUInt32();
21354              break;
21355            }
21356            case 24: {
21357              bitField0_ |= 0x00000004;
21358              numKeys_ = input.readUInt32();
21359              break;
21360            }
21361            case 32: {
21362              bitField0_ |= 0x00000008;
21363              numTokens_ = input.readUInt32();
21364              break;
21365            }
21366          }
21367        }
21368      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21369        throw e.setUnfinishedMessage(this);
21370      } catch (java.io.IOException e) {
21371        throw new com.google.protobuf.InvalidProtocolBufferException(
21372            e.getMessage()).setUnfinishedMessage(this);
21373      } finally {
21374        this.unknownFields = unknownFields.build();
21375        makeExtensionsImmutable();
21376      }
21377    }
21378    public static final com.google.protobuf.Descriptors.Descriptor
21379        getDescriptor() {
21380      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
21381    }
21382
21383    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21384        internalGetFieldAccessorTable() {
21385      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable
21386          .ensureFieldAccessorsInitialized(
21387              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class);
21388    }
21389
21390    public static com.google.protobuf.Parser<SecretManagerSection> PARSER =
21391        new com.google.protobuf.AbstractParser<SecretManagerSection>() {
21392      public SecretManagerSection parsePartialFrom(
21393          com.google.protobuf.CodedInputStream input,
21394          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21395          throws com.google.protobuf.InvalidProtocolBufferException {
21396        return new SecretManagerSection(input, extensionRegistry);
21397      }
21398    };
21399
21400    @java.lang.Override
21401    public com.google.protobuf.Parser<SecretManagerSection> getParserForType() {
21402      return PARSER;
21403    }
21404
21405    public interface DelegationKeyOrBuilder
21406        extends com.google.protobuf.MessageOrBuilder {
21407
21408      // optional uint32 id = 1;
21409      /**
21410       * <code>optional uint32 id = 1;</code>
21411       */
21412      boolean hasId();
21413      /**
21414       * <code>optional uint32 id = 1;</code>
21415       */
21416      int getId();
21417
21418      // optional uint64 expiryDate = 2;
21419      /**
21420       * <code>optional uint64 expiryDate = 2;</code>
21421       */
21422      boolean hasExpiryDate();
21423      /**
21424       * <code>optional uint64 expiryDate = 2;</code>
21425       */
21426      long getExpiryDate();
21427
21428      // optional bytes key = 3;
21429      /**
21430       * <code>optional bytes key = 3;</code>
21431       */
21432      boolean hasKey();
21433      /**
21434       * <code>optional bytes key = 3;</code>
21435       */
21436      com.google.protobuf.ByteString getKey();
21437    }
21438    /**
21439     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey}
21440     */
21441    public static final class DelegationKey extends
21442        com.google.protobuf.GeneratedMessage
21443        implements DelegationKeyOrBuilder {
21444      // Use DelegationKey.newBuilder() to construct.
21445      private DelegationKey(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
21446        super(builder);
21447        this.unknownFields = builder.getUnknownFields();
21448      }
21449      private DelegationKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
21450
21451      private static final DelegationKey defaultInstance;
21452      public static DelegationKey getDefaultInstance() {
21453        return defaultInstance;
21454      }
21455
21456      public DelegationKey getDefaultInstanceForType() {
21457        return defaultInstance;
21458      }
21459
21460      private final com.google.protobuf.UnknownFieldSet unknownFields;
21461      @java.lang.Override
21462      public final com.google.protobuf.UnknownFieldSet
21463          getUnknownFields() {
21464        return this.unknownFields;
21465      }
21466      private DelegationKey(
21467          com.google.protobuf.CodedInputStream input,
21468          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21469          throws com.google.protobuf.InvalidProtocolBufferException {
21470        initFields();
21471        int mutable_bitField0_ = 0;
21472        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
21473            com.google.protobuf.UnknownFieldSet.newBuilder();
21474        try {
21475          boolean done = false;
21476          while (!done) {
21477            int tag = input.readTag();
21478            switch (tag) {
21479              case 0:
21480                done = true;
21481                break;
21482              default: {
21483                if (!parseUnknownField(input, unknownFields,
21484                                       extensionRegistry, tag)) {
21485                  done = true;
21486                }
21487                break;
21488              }
21489              case 8: {
21490                bitField0_ |= 0x00000001;
21491                id_ = input.readUInt32();
21492                break;
21493              }
21494              case 16: {
21495                bitField0_ |= 0x00000002;
21496                expiryDate_ = input.readUInt64();
21497                break;
21498              }
21499              case 26: {
21500                bitField0_ |= 0x00000004;
21501                key_ = input.readBytes();
21502                break;
21503              }
21504            }
21505          }
21506        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21507          throw e.setUnfinishedMessage(this);
21508        } catch (java.io.IOException e) {
21509          throw new com.google.protobuf.InvalidProtocolBufferException(
21510              e.getMessage()).setUnfinishedMessage(this);
21511        } finally {
21512          this.unknownFields = unknownFields.build();
21513          makeExtensionsImmutable();
21514        }
21515      }
21516      public static final com.google.protobuf.Descriptors.Descriptor
21517          getDescriptor() {
21518        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
21519      }
21520
21521      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21522          internalGetFieldAccessorTable() {
21523        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable
21524            .ensureFieldAccessorsInitialized(
21525                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class);
21526      }
21527
21528      public static com.google.protobuf.Parser<DelegationKey> PARSER =
21529          new com.google.protobuf.AbstractParser<DelegationKey>() {
21530        public DelegationKey parsePartialFrom(
21531            com.google.protobuf.CodedInputStream input,
21532            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21533            throws com.google.protobuf.InvalidProtocolBufferException {
21534          return new DelegationKey(input, extensionRegistry);
21535        }
21536      };
21537
21538      @java.lang.Override
21539      public com.google.protobuf.Parser<DelegationKey> getParserForType() {
21540        return PARSER;
21541      }
21542
21543      private int bitField0_;
21544      // optional uint32 id = 1;
21545      public static final int ID_FIELD_NUMBER = 1;
21546      private int id_;
21547      /**
21548       * <code>optional uint32 id = 1;</code>
21549       */
21550      public boolean hasId() {
21551        return ((bitField0_ & 0x00000001) == 0x00000001);
21552      }
21553      /**
21554       * <code>optional uint32 id = 1;</code>
21555       */
21556      public int getId() {
21557        return id_;
21558      }
21559
21560      // optional uint64 expiryDate = 2;
21561      public static final int EXPIRYDATE_FIELD_NUMBER = 2;
21562      private long expiryDate_;
21563      /**
21564       * <code>optional uint64 expiryDate = 2;</code>
21565       */
21566      public boolean hasExpiryDate() {
21567        return ((bitField0_ & 0x00000002) == 0x00000002);
21568      }
21569      /**
21570       * <code>optional uint64 expiryDate = 2;</code>
21571       */
21572      public long getExpiryDate() {
21573        return expiryDate_;
21574      }
21575
21576      // optional bytes key = 3;
21577      public static final int KEY_FIELD_NUMBER = 3;
21578      private com.google.protobuf.ByteString key_;
21579      /**
21580       * <code>optional bytes key = 3;</code>
21581       */
21582      public boolean hasKey() {
21583        return ((bitField0_ & 0x00000004) == 0x00000004);
21584      }
21585      /**
21586       * <code>optional bytes key = 3;</code>
21587       */
21588      public com.google.protobuf.ByteString getKey() {
21589        return key_;
21590      }
21591
21592      private void initFields() {
21593        id_ = 0;
21594        expiryDate_ = 0L;
21595        key_ = com.google.protobuf.ByteString.EMPTY;
21596      }
21597      private byte memoizedIsInitialized = -1;
21598      public final boolean isInitialized() {
21599        byte isInitialized = memoizedIsInitialized;
21600        if (isInitialized != -1) return isInitialized == 1;
21601
21602        memoizedIsInitialized = 1;
21603        return true;
21604      }
21605
21606      public void writeTo(com.google.protobuf.CodedOutputStream output)
21607                          throws java.io.IOException {
21608        getSerializedSize();
21609        if (((bitField0_ & 0x00000001) == 0x00000001)) {
21610          output.writeUInt32(1, id_);
21611        }
21612        if (((bitField0_ & 0x00000002) == 0x00000002)) {
21613          output.writeUInt64(2, expiryDate_);
21614        }
21615        if (((bitField0_ & 0x00000004) == 0x00000004)) {
21616          output.writeBytes(3, key_);
21617        }
21618        getUnknownFields().writeTo(output);
21619      }
21620
21621      private int memoizedSerializedSize = -1;
21622      public int getSerializedSize() {
21623        int size = memoizedSerializedSize;
21624        if (size != -1) return size;
21625
21626        size = 0;
21627        if (((bitField0_ & 0x00000001) == 0x00000001)) {
21628          size += com.google.protobuf.CodedOutputStream
21629            .computeUInt32Size(1, id_);
21630        }
21631        if (((bitField0_ & 0x00000002) == 0x00000002)) {
21632          size += com.google.protobuf.CodedOutputStream
21633            .computeUInt64Size(2, expiryDate_);
21634        }
21635        if (((bitField0_ & 0x00000004) == 0x00000004)) {
21636          size += com.google.protobuf.CodedOutputStream
21637            .computeBytesSize(3, key_);
21638        }
21639        size += getUnknownFields().getSerializedSize();
21640        memoizedSerializedSize = size;
21641        return size;
21642      }
21643
21644      private static final long serialVersionUID = 0L;
21645      @java.lang.Override
21646      protected java.lang.Object writeReplace()
21647          throws java.io.ObjectStreamException {
21648        return super.writeReplace();
21649      }
21650
21651      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21652          com.google.protobuf.ByteString data)
21653          throws com.google.protobuf.InvalidProtocolBufferException {
21654        return PARSER.parseFrom(data);
21655      }
21656      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21657          com.google.protobuf.ByteString data,
21658          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21659          throws com.google.protobuf.InvalidProtocolBufferException {
21660        return PARSER.parseFrom(data, extensionRegistry);
21661      }
21662      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(byte[] data)
21663          throws com.google.protobuf.InvalidProtocolBufferException {
21664        return PARSER.parseFrom(data);
21665      }
21666      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21667          byte[] data,
21668          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21669          throws com.google.protobuf.InvalidProtocolBufferException {
21670        return PARSER.parseFrom(data, extensionRegistry);
21671      }
21672      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(java.io.InputStream input)
21673          throws java.io.IOException {
21674        return PARSER.parseFrom(input);
21675      }
21676      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21677          java.io.InputStream input,
21678          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21679          throws java.io.IOException {
21680        return PARSER.parseFrom(input, extensionRegistry);
21681      }
21682      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom(java.io.InputStream input)
21683          throws java.io.IOException {
21684        return PARSER.parseDelimitedFrom(input);
21685      }
21686      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom(
21687          java.io.InputStream input,
21688          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21689          throws java.io.IOException {
21690        return PARSER.parseDelimitedFrom(input, extensionRegistry);
21691      }
21692      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21693          com.google.protobuf.CodedInputStream input)
21694          throws java.io.IOException {
21695        return PARSER.parseFrom(input);
21696      }
21697      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
21698          com.google.protobuf.CodedInputStream input,
21699          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21700          throws java.io.IOException {
21701        return PARSER.parseFrom(input, extensionRegistry);
21702      }
21703
21704      public static Builder newBuilder() { return Builder.create(); }
21705      public Builder newBuilderForType() { return newBuilder(); }
21706      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey prototype) {
21707        return newBuilder().mergeFrom(prototype);
21708      }
21709      public Builder toBuilder() { return newBuilder(this); }
21710
21711      @java.lang.Override
21712      protected Builder newBuilderForType(
21713          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21714        Builder builder = new Builder(parent);
21715        return builder;
21716      }
21717      /**
21718       * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey}
21719       */
21720      public static final class Builder extends
21721          com.google.protobuf.GeneratedMessage.Builder<Builder>
21722         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKeyOrBuilder {
21723        public static final com.google.protobuf.Descriptors.Descriptor
21724            getDescriptor() {
21725          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
21726        }
21727
21728        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21729            internalGetFieldAccessorTable() {
21730          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable
21731              .ensureFieldAccessorsInitialized(
21732                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class);
21733        }
21734
21735        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.newBuilder()
21736        private Builder() {
21737          maybeForceBuilderInitialization();
21738        }
21739
21740        private Builder(
21741            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21742          super(parent);
21743          maybeForceBuilderInitialization();
21744        }
21745        private void maybeForceBuilderInitialization() {
21746          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
21747          }
21748        }
21749        private static Builder create() {
21750          return new Builder();
21751        }
21752
21753        public Builder clear() {
21754          super.clear();
21755          id_ = 0;
21756          bitField0_ = (bitField0_ & ~0x00000001);
21757          expiryDate_ = 0L;
21758          bitField0_ = (bitField0_ & ~0x00000002);
21759          key_ = com.google.protobuf.ByteString.EMPTY;
21760          bitField0_ = (bitField0_ & ~0x00000004);
21761          return this;
21762        }
21763
21764        public Builder clone() {
21765          return create().mergeFrom(buildPartial());
21766        }
21767
21768        public com.google.protobuf.Descriptors.Descriptor
21769            getDescriptorForType() {
21770          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
21771        }
21772
21773        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstanceForType() {
21774          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance();
21775        }
21776
21777        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey build() {
21778          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = buildPartial();
21779          if (!result.isInitialized()) {
21780            throw newUninitializedMessageException(result);
21781          }
21782          return result;
21783        }
21784
21785        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey buildPartial() {
21786          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey(this);
21787          int from_bitField0_ = bitField0_;
21788          int to_bitField0_ = 0;
21789          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
21790            to_bitField0_ |= 0x00000001;
21791          }
21792          result.id_ = id_;
21793          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
21794            to_bitField0_ |= 0x00000002;
21795          }
21796          result.expiryDate_ = expiryDate_;
21797          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
21798            to_bitField0_ |= 0x00000004;
21799          }
21800          result.key_ = key_;
21801          result.bitField0_ = to_bitField0_;
21802          onBuilt();
21803          return result;
21804        }
21805
21806        public Builder mergeFrom(com.google.protobuf.Message other) {
21807          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) {
21808            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey)other);
21809          } else {
21810            super.mergeFrom(other);
21811            return this;
21812          }
21813        }
21814
21815        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey other) {
21816          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance()) return this;
21817          if (other.hasId()) {
21818            setId(other.getId());
21819          }
21820          if (other.hasExpiryDate()) {
21821            setExpiryDate(other.getExpiryDate());
21822          }
21823          if (other.hasKey()) {
21824            setKey(other.getKey());
21825          }
21826          this.mergeUnknownFields(other.getUnknownFields());
21827          return this;
21828        }
21829
21830        public final boolean isInitialized() {
21831          return true;
21832        }
21833
21834        public Builder mergeFrom(
21835            com.google.protobuf.CodedInputStream input,
21836            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21837            throws java.io.IOException {
21838          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parsedMessage = null;
21839          try {
21840            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
21841          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21842            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) e.getUnfinishedMessage();
21843            throw e;
21844          } finally {
21845            if (parsedMessage != null) {
21846              mergeFrom(parsedMessage);
21847            }
21848          }
21849          return this;
21850        }
21851        private int bitField0_;
21852
21853        // optional uint32 id = 1;
21854        private int id_ ;
21855        /**
21856         * <code>optional uint32 id = 1;</code>
21857         */
21858        public boolean hasId() {
21859          return ((bitField0_ & 0x00000001) == 0x00000001);
21860        }
21861        /**
21862         * <code>optional uint32 id = 1;</code>
21863         */
21864        public int getId() {
21865          return id_;
21866        }
21867        /**
21868         * <code>optional uint32 id = 1;</code>
21869         */
21870        public Builder setId(int value) {
21871          bitField0_ |= 0x00000001;
21872          id_ = value;
21873          onChanged();
21874          return this;
21875        }
21876        /**
21877         * <code>optional uint32 id = 1;</code>
21878         */
21879        public Builder clearId() {
21880          bitField0_ = (bitField0_ & ~0x00000001);
21881          id_ = 0;
21882          onChanged();
21883          return this;
21884        }
21885
21886        // optional uint64 expiryDate = 2;
21887        private long expiryDate_ ;
21888        /**
21889         * <code>optional uint64 expiryDate = 2;</code>
21890         */
21891        public boolean hasExpiryDate() {
21892          return ((bitField0_ & 0x00000002) == 0x00000002);
21893        }
21894        /**
21895         * <code>optional uint64 expiryDate = 2;</code>
21896         */
21897        public long getExpiryDate() {
21898          return expiryDate_;
21899        }
21900        /**
21901         * <code>optional uint64 expiryDate = 2;</code>
21902         */
21903        public Builder setExpiryDate(long value) {
21904          bitField0_ |= 0x00000002;
21905          expiryDate_ = value;
21906          onChanged();
21907          return this;
21908        }
21909        /**
21910         * <code>optional uint64 expiryDate = 2;</code>
21911         */
21912        public Builder clearExpiryDate() {
21913          bitField0_ = (bitField0_ & ~0x00000002);
21914          expiryDate_ = 0L;
21915          onChanged();
21916          return this;
21917        }
21918
21919        // optional bytes key = 3;
21920        private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
21921        /**
21922         * <code>optional bytes key = 3;</code>
21923         */
21924        public boolean hasKey() {
21925          return ((bitField0_ & 0x00000004) == 0x00000004);
21926        }
21927        /**
21928         * <code>optional bytes key = 3;</code>
21929         */
21930        public com.google.protobuf.ByteString getKey() {
21931          return key_;
21932        }
21933        /**
21934         * <code>optional bytes key = 3;</code>
21935         */
21936        public Builder setKey(com.google.protobuf.ByteString value) {
21937          if (value == null) {
21938    throw new NullPointerException();
21939  }
21940  bitField0_ |= 0x00000004;
21941          key_ = value;
21942          onChanged();
21943          return this;
21944        }
21945        /**
21946         * <code>optional bytes key = 3;</code>
21947         */
21948        public Builder clearKey() {
21949          bitField0_ = (bitField0_ & ~0x00000004);
21950          key_ = getDefaultInstance().getKey();
21951          onChanged();
21952          return this;
21953        }
21954
21955        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
21956      }
21957
21958      static {
21959        defaultInstance = new DelegationKey(true);
21960        defaultInstance.initFields();
21961      }
21962
21963      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
21964    }
21965
21966    public interface PersistTokenOrBuilder
21967        extends com.google.protobuf.MessageOrBuilder {
21968
21969      // optional uint32 version = 1;
21970      /**
21971       * <code>optional uint32 version = 1;</code>
21972       */
21973      boolean hasVersion();
21974      /**
21975       * <code>optional uint32 version = 1;</code>
21976       */
21977      int getVersion();
21978
21979      // optional string owner = 2;
21980      /**
21981       * <code>optional string owner = 2;</code>
21982       */
21983      boolean hasOwner();
21984      /**
21985       * <code>optional string owner = 2;</code>
21986       */
21987      java.lang.String getOwner();
21988      /**
21989       * <code>optional string owner = 2;</code>
21990       */
21991      com.google.protobuf.ByteString
21992          getOwnerBytes();
21993
21994      // optional string renewer = 3;
21995      /**
21996       * <code>optional string renewer = 3;</code>
21997       */
21998      boolean hasRenewer();
21999      /**
22000       * <code>optional string renewer = 3;</code>
22001       */
22002      java.lang.String getRenewer();
22003      /**
22004       * <code>optional string renewer = 3;</code>
22005       */
22006      com.google.protobuf.ByteString
22007          getRenewerBytes();
22008
22009      // optional string realUser = 4;
22010      /**
22011       * <code>optional string realUser = 4;</code>
22012       */
22013      boolean hasRealUser();
22014      /**
22015       * <code>optional string realUser = 4;</code>
22016       */
22017      java.lang.String getRealUser();
22018      /**
22019       * <code>optional string realUser = 4;</code>
22020       */
22021      com.google.protobuf.ByteString
22022          getRealUserBytes();
22023
22024      // optional uint64 issueDate = 5;
22025      /**
22026       * <code>optional uint64 issueDate = 5;</code>
22027       */
22028      boolean hasIssueDate();
22029      /**
22030       * <code>optional uint64 issueDate = 5;</code>
22031       */
22032      long getIssueDate();
22033
22034      // optional uint64 maxDate = 6;
22035      /**
22036       * <code>optional uint64 maxDate = 6;</code>
22037       */
22038      boolean hasMaxDate();
22039      /**
22040       * <code>optional uint64 maxDate = 6;</code>
22041       */
22042      long getMaxDate();
22043
22044      // optional uint32 sequenceNumber = 7;
22045      /**
22046       * <code>optional uint32 sequenceNumber = 7;</code>
22047       */
22048      boolean hasSequenceNumber();
22049      /**
22050       * <code>optional uint32 sequenceNumber = 7;</code>
22051       */
22052      int getSequenceNumber();
22053
22054      // optional uint32 masterKeyId = 8;
22055      /**
22056       * <code>optional uint32 masterKeyId = 8;</code>
22057       */
22058      boolean hasMasterKeyId();
22059      /**
22060       * <code>optional uint32 masterKeyId = 8;</code>
22061       */
22062      int getMasterKeyId();
22063
22064      // optional uint64 expiryDate = 9;
22065      /**
22066       * <code>optional uint64 expiryDate = 9;</code>
22067       */
22068      boolean hasExpiryDate();
22069      /**
22070       * <code>optional uint64 expiryDate = 9;</code>
22071       */
22072      long getExpiryDate();
22073    }
22074    /**
22075     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken}
22076     */
22077    public static final class PersistToken extends
22078        com.google.protobuf.GeneratedMessage
22079        implements PersistTokenOrBuilder {
22080      // Use PersistToken.newBuilder() to construct.
22081      private PersistToken(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
22082        super(builder);
22083        this.unknownFields = builder.getUnknownFields();
22084      }
22085      private PersistToken(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
22086
22087      private static final PersistToken defaultInstance;
22088      public static PersistToken getDefaultInstance() {
22089        return defaultInstance;
22090      }
22091
22092      public PersistToken getDefaultInstanceForType() {
22093        return defaultInstance;
22094      }
22095
22096      private final com.google.protobuf.UnknownFieldSet unknownFields;
22097      @java.lang.Override
22098      public final com.google.protobuf.UnknownFieldSet
22099          getUnknownFields() {
22100        return this.unknownFields;
22101      }
22102      private PersistToken(
22103          com.google.protobuf.CodedInputStream input,
22104          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22105          throws com.google.protobuf.InvalidProtocolBufferException {
22106        initFields();
22107        int mutable_bitField0_ = 0;
22108        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
22109            com.google.protobuf.UnknownFieldSet.newBuilder();
22110        try {
22111          boolean done = false;
22112          while (!done) {
22113            int tag = input.readTag();
22114            switch (tag) {
22115              case 0:
22116                done = true;
22117                break;
22118              default: {
22119                if (!parseUnknownField(input, unknownFields,
22120                                       extensionRegistry, tag)) {
22121                  done = true;
22122                }
22123                break;
22124              }
22125              case 8: {
22126                bitField0_ |= 0x00000001;
22127                version_ = input.readUInt32();
22128                break;
22129              }
22130              case 18: {
22131                bitField0_ |= 0x00000002;
22132                owner_ = input.readBytes();
22133                break;
22134              }
22135              case 26: {
22136                bitField0_ |= 0x00000004;
22137                renewer_ = input.readBytes();
22138                break;
22139              }
22140              case 34: {
22141                bitField0_ |= 0x00000008;
22142                realUser_ = input.readBytes();
22143                break;
22144              }
22145              case 40: {
22146                bitField0_ |= 0x00000010;
22147                issueDate_ = input.readUInt64();
22148                break;
22149              }
22150              case 48: {
22151                bitField0_ |= 0x00000020;
22152                maxDate_ = input.readUInt64();
22153                break;
22154              }
22155              case 56: {
22156                bitField0_ |= 0x00000040;
22157                sequenceNumber_ = input.readUInt32();
22158                break;
22159              }
22160              case 64: {
22161                bitField0_ |= 0x00000080;
22162                masterKeyId_ = input.readUInt32();
22163                break;
22164              }
22165              case 72: {
22166                bitField0_ |= 0x00000100;
22167                expiryDate_ = input.readUInt64();
22168                break;
22169              }
22170            }
22171          }
22172        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
22173          throw e.setUnfinishedMessage(this);
22174        } catch (java.io.IOException e) {
22175          throw new com.google.protobuf.InvalidProtocolBufferException(
22176              e.getMessage()).setUnfinishedMessage(this);
22177        } finally {
22178          this.unknownFields = unknownFields.build();
22179          makeExtensionsImmutable();
22180        }
22181      }
22182      public static final com.google.protobuf.Descriptors.Descriptor
22183          getDescriptor() {
22184        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
22185      }
22186
22187      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
22188          internalGetFieldAccessorTable() {
22189        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable
22190            .ensureFieldAccessorsInitialized(
22191                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class);
22192      }
22193
22194      public static com.google.protobuf.Parser<PersistToken> PARSER =
22195          new com.google.protobuf.AbstractParser<PersistToken>() {
22196        public PersistToken parsePartialFrom(
22197            com.google.protobuf.CodedInputStream input,
22198            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22199            throws com.google.protobuf.InvalidProtocolBufferException {
22200          return new PersistToken(input, extensionRegistry);
22201        }
22202      };
22203
22204      @java.lang.Override
22205      public com.google.protobuf.Parser<PersistToken> getParserForType() {
22206        return PARSER;
22207      }
22208
22209      private int bitField0_;
22210      // optional uint32 version = 1;
22211      public static final int VERSION_FIELD_NUMBER = 1;
22212      private int version_;
22213      /**
22214       * <code>optional uint32 version = 1;</code>
22215       */
22216      public boolean hasVersion() {
22217        return ((bitField0_ & 0x00000001) == 0x00000001);
22218      }
22219      /**
22220       * <code>optional uint32 version = 1;</code>
22221       */
22222      public int getVersion() {
22223        return version_;
22224      }
22225
22226      // optional string owner = 2;
22227      public static final int OWNER_FIELD_NUMBER = 2;
22228      private java.lang.Object owner_;
22229      /**
22230       * <code>optional string owner = 2;</code>
22231       */
22232      public boolean hasOwner() {
22233        return ((bitField0_ & 0x00000002) == 0x00000002);
22234      }
22235      /**
22236       * <code>optional string owner = 2;</code>
22237       */
22238      public java.lang.String getOwner() {
22239        java.lang.Object ref = owner_;
22240        if (ref instanceof java.lang.String) {
22241          return (java.lang.String) ref;
22242        } else {
22243          com.google.protobuf.ByteString bs = 
22244              (com.google.protobuf.ByteString) ref;
22245          java.lang.String s = bs.toStringUtf8();
22246          if (bs.isValidUtf8()) {
22247            owner_ = s;
22248          }
22249          return s;
22250        }
22251      }
22252      /**
22253       * <code>optional string owner = 2;</code>
22254       */
22255      public com.google.protobuf.ByteString
22256          getOwnerBytes() {
22257        java.lang.Object ref = owner_;
22258        if (ref instanceof java.lang.String) {
22259          com.google.protobuf.ByteString b = 
22260              com.google.protobuf.ByteString.copyFromUtf8(
22261                  (java.lang.String) ref);
22262          owner_ = b;
22263          return b;
22264        } else {
22265          return (com.google.protobuf.ByteString) ref;
22266        }
22267      }
22268
22269      // optional string renewer = 3;
22270      public static final int RENEWER_FIELD_NUMBER = 3;
22271      private java.lang.Object renewer_;
22272      /**
22273       * <code>optional string renewer = 3;</code>
22274       */
22275      public boolean hasRenewer() {
22276        return ((bitField0_ & 0x00000004) == 0x00000004);
22277      }
22278      /**
22279       * <code>optional string renewer = 3;</code>
22280       */
22281      public java.lang.String getRenewer() {
22282        java.lang.Object ref = renewer_;
22283        if (ref instanceof java.lang.String) {
22284          return (java.lang.String) ref;
22285        } else {
22286          com.google.protobuf.ByteString bs = 
22287              (com.google.protobuf.ByteString) ref;
22288          java.lang.String s = bs.toStringUtf8();
22289          if (bs.isValidUtf8()) {
22290            renewer_ = s;
22291          }
22292          return s;
22293        }
22294      }
22295      /**
22296       * <code>optional string renewer = 3;</code>
22297       */
22298      public com.google.protobuf.ByteString
22299          getRenewerBytes() {
22300        java.lang.Object ref = renewer_;
22301        if (ref instanceof java.lang.String) {
22302          com.google.protobuf.ByteString b = 
22303              com.google.protobuf.ByteString.copyFromUtf8(
22304                  (java.lang.String) ref);
22305          renewer_ = b;
22306          return b;
22307        } else {
22308          return (com.google.protobuf.ByteString) ref;
22309        }
22310      }
22311
22312      // optional string realUser = 4;
22313      public static final int REALUSER_FIELD_NUMBER = 4;
22314      private java.lang.Object realUser_;
22315      /**
22316       * <code>optional string realUser = 4;</code>
22317       */
22318      public boolean hasRealUser() {
22319        return ((bitField0_ & 0x00000008) == 0x00000008);
22320      }
22321      /**
22322       * <code>optional string realUser = 4;</code>
22323       */
22324      public java.lang.String getRealUser() {
22325        java.lang.Object ref = realUser_;
22326        if (ref instanceof java.lang.String) {
22327          return (java.lang.String) ref;
22328        } else {
22329          com.google.protobuf.ByteString bs = 
22330              (com.google.protobuf.ByteString) ref;
22331          java.lang.String s = bs.toStringUtf8();
22332          if (bs.isValidUtf8()) {
22333            realUser_ = s;
22334          }
22335          return s;
22336        }
22337      }
22338      /**
22339       * <code>optional string realUser = 4;</code>
22340       */
22341      public com.google.protobuf.ByteString
22342          getRealUserBytes() {
22343        java.lang.Object ref = realUser_;
22344        if (ref instanceof java.lang.String) {
22345          com.google.protobuf.ByteString b = 
22346              com.google.protobuf.ByteString.copyFromUtf8(
22347                  (java.lang.String) ref);
22348          realUser_ = b;
22349          return b;
22350        } else {
22351          return (com.google.protobuf.ByteString) ref;
22352        }
22353      }
22354
22355      // optional uint64 issueDate = 5;
22356      public static final int ISSUEDATE_FIELD_NUMBER = 5;
22357      private long issueDate_;
22358      /**
22359       * <code>optional uint64 issueDate = 5;</code>
22360       */
22361      public boolean hasIssueDate() {
22362        return ((bitField0_ & 0x00000010) == 0x00000010);
22363      }
22364      /**
22365       * <code>optional uint64 issueDate = 5;</code>
22366       */
22367      public long getIssueDate() {
22368        return issueDate_;
22369      }
22370
22371      // optional uint64 maxDate = 6;
22372      public static final int MAXDATE_FIELD_NUMBER = 6;
22373      private long maxDate_;
22374      /**
22375       * <code>optional uint64 maxDate = 6;</code>
22376       */
22377      public boolean hasMaxDate() {
22378        return ((bitField0_ & 0x00000020) == 0x00000020);
22379      }
22380      /**
22381       * <code>optional uint64 maxDate = 6;</code>
22382       */
22383      public long getMaxDate() {
22384        return maxDate_;
22385      }
22386
22387      // optional uint32 sequenceNumber = 7;
22388      public static final int SEQUENCENUMBER_FIELD_NUMBER = 7;
22389      private int sequenceNumber_;
22390      /**
22391       * <code>optional uint32 sequenceNumber = 7;</code>
22392       */
22393      public boolean hasSequenceNumber() {
22394        return ((bitField0_ & 0x00000040) == 0x00000040);
22395      }
22396      /**
22397       * <code>optional uint32 sequenceNumber = 7;</code>
22398       */
22399      public int getSequenceNumber() {
22400        return sequenceNumber_;
22401      }
22402
22403      // optional uint32 masterKeyId = 8;
22404      public static final int MASTERKEYID_FIELD_NUMBER = 8;
22405      private int masterKeyId_;
22406      /**
22407       * <code>optional uint32 masterKeyId = 8;</code>
22408       */
22409      public boolean hasMasterKeyId() {
22410        return ((bitField0_ & 0x00000080) == 0x00000080);
22411      }
22412      /**
22413       * <code>optional uint32 masterKeyId = 8;</code>
22414       */
22415      public int getMasterKeyId() {
22416        return masterKeyId_;
22417      }
22418
22419      // optional uint64 expiryDate = 9;
22420      public static final int EXPIRYDATE_FIELD_NUMBER = 9;
22421      private long expiryDate_;
22422      /**
22423       * <code>optional uint64 expiryDate = 9;</code>
22424       */
22425      public boolean hasExpiryDate() {
22426        return ((bitField0_ & 0x00000100) == 0x00000100);
22427      }
22428      /**
22429       * <code>optional uint64 expiryDate = 9;</code>
22430       */
22431      public long getExpiryDate() {
22432        return expiryDate_;
22433      }
22434
22435      private void initFields() {
22436        version_ = 0;
22437        owner_ = "";
22438        renewer_ = "";
22439        realUser_ = "";
22440        issueDate_ = 0L;
22441        maxDate_ = 0L;
22442        sequenceNumber_ = 0;
22443        masterKeyId_ = 0;
22444        expiryDate_ = 0L;
22445      }
22446      private byte memoizedIsInitialized = -1;
22447      public final boolean isInitialized() {
22448        byte isInitialized = memoizedIsInitialized;
22449        if (isInitialized != -1) return isInitialized == 1;
22450
22451        memoizedIsInitialized = 1;
22452        return true;
22453      }
22454
22455      public void writeTo(com.google.protobuf.CodedOutputStream output)
22456                          throws java.io.IOException {
22457        getSerializedSize();
22458        if (((bitField0_ & 0x00000001) == 0x00000001)) {
22459          output.writeUInt32(1, version_);
22460        }
22461        if (((bitField0_ & 0x00000002) == 0x00000002)) {
22462          output.writeBytes(2, getOwnerBytes());
22463        }
22464        if (((bitField0_ & 0x00000004) == 0x00000004)) {
22465          output.writeBytes(3, getRenewerBytes());
22466        }
22467        if (((bitField0_ & 0x00000008) == 0x00000008)) {
22468          output.writeBytes(4, getRealUserBytes());
22469        }
22470        if (((bitField0_ & 0x00000010) == 0x00000010)) {
22471          output.writeUInt64(5, issueDate_);
22472        }
22473        if (((bitField0_ & 0x00000020) == 0x00000020)) {
22474          output.writeUInt64(6, maxDate_);
22475        }
22476        if (((bitField0_ & 0x00000040) == 0x00000040)) {
22477          output.writeUInt32(7, sequenceNumber_);
22478        }
22479        if (((bitField0_ & 0x00000080) == 0x00000080)) {
22480          output.writeUInt32(8, masterKeyId_);
22481        }
22482        if (((bitField0_ & 0x00000100) == 0x00000100)) {
22483          output.writeUInt64(9, expiryDate_);
22484        }
22485        getUnknownFields().writeTo(output);
22486      }
22487
22488      private int memoizedSerializedSize = -1;
22489      public int getSerializedSize() {
22490        int size = memoizedSerializedSize;
22491        if (size != -1) return size;
22492
22493        size = 0;
22494        if (((bitField0_ & 0x00000001) == 0x00000001)) {
22495          size += com.google.protobuf.CodedOutputStream
22496            .computeUInt32Size(1, version_);
22497        }
22498        if (((bitField0_ & 0x00000002) == 0x00000002)) {
22499          size += com.google.protobuf.CodedOutputStream
22500            .computeBytesSize(2, getOwnerBytes());
22501        }
22502        if (((bitField0_ & 0x00000004) == 0x00000004)) {
22503          size += com.google.protobuf.CodedOutputStream
22504            .computeBytesSize(3, getRenewerBytes());
22505        }
22506        if (((bitField0_ & 0x00000008) == 0x00000008)) {
22507          size += com.google.protobuf.CodedOutputStream
22508            .computeBytesSize(4, getRealUserBytes());
22509        }
22510        if (((bitField0_ & 0x00000010) == 0x00000010)) {
22511          size += com.google.protobuf.CodedOutputStream
22512            .computeUInt64Size(5, issueDate_);
22513        }
22514        if (((bitField0_ & 0x00000020) == 0x00000020)) {
22515          size += com.google.protobuf.CodedOutputStream
22516            .computeUInt64Size(6, maxDate_);
22517        }
22518        if (((bitField0_ & 0x00000040) == 0x00000040)) {
22519          size += com.google.protobuf.CodedOutputStream
22520            .computeUInt32Size(7, sequenceNumber_);
22521        }
22522        if (((bitField0_ & 0x00000080) == 0x00000080)) {
22523          size += com.google.protobuf.CodedOutputStream
22524            .computeUInt32Size(8, masterKeyId_);
22525        }
22526        if (((bitField0_ & 0x00000100) == 0x00000100)) {
22527          size += com.google.protobuf.CodedOutputStream
22528            .computeUInt64Size(9, expiryDate_);
22529        }
22530        size += getUnknownFields().getSerializedSize();
22531        memoizedSerializedSize = size;
22532        return size;
22533      }
22534
22535      private static final long serialVersionUID = 0L;
22536      @java.lang.Override
22537      protected java.lang.Object writeReplace()
22538          throws java.io.ObjectStreamException {
22539        return super.writeReplace();
22540      }
22541
22542      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22543          com.google.protobuf.ByteString data)
22544          throws com.google.protobuf.InvalidProtocolBufferException {
22545        return PARSER.parseFrom(data);
22546      }
22547      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22548          com.google.protobuf.ByteString data,
22549          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22550          throws com.google.protobuf.InvalidProtocolBufferException {
22551        return PARSER.parseFrom(data, extensionRegistry);
22552      }
22553      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(byte[] data)
22554          throws com.google.protobuf.InvalidProtocolBufferException {
22555        return PARSER.parseFrom(data);
22556      }
22557      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22558          byte[] data,
22559          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22560          throws com.google.protobuf.InvalidProtocolBufferException {
22561        return PARSER.parseFrom(data, extensionRegistry);
22562      }
22563      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(java.io.InputStream input)
22564          throws java.io.IOException {
22565        return PARSER.parseFrom(input);
22566      }
22567      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22568          java.io.InputStream input,
22569          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22570          throws java.io.IOException {
22571        return PARSER.parseFrom(input, extensionRegistry);
22572      }
22573      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom(java.io.InputStream input)
22574          throws java.io.IOException {
22575        return PARSER.parseDelimitedFrom(input);
22576      }
22577      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom(
22578          java.io.InputStream input,
22579          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22580          throws java.io.IOException {
22581        return PARSER.parseDelimitedFrom(input, extensionRegistry);
22582      }
22583      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22584          com.google.protobuf.CodedInputStream input)
22585          throws java.io.IOException {
22586        return PARSER.parseFrom(input);
22587      }
22588      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
22589          com.google.protobuf.CodedInputStream input,
22590          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22591          throws java.io.IOException {
22592        return PARSER.parseFrom(input, extensionRegistry);
22593      }
22594
22595      public static Builder newBuilder() { return Builder.create(); }
22596      public Builder newBuilderForType() { return newBuilder(); }
22597      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken prototype) {
22598        return newBuilder().mergeFrom(prototype);
22599      }
22600      public Builder toBuilder() { return newBuilder(this); }
22601
22602      @java.lang.Override
22603      protected Builder newBuilderForType(
22604          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
22605        Builder builder = new Builder(parent);
22606        return builder;
22607      }
22608      /**
22609       * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken}
22610       */
22611      public static final class Builder extends
22612          com.google.protobuf.GeneratedMessage.Builder<Builder>
22613         implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistTokenOrBuilder {
22614        public static final com.google.protobuf.Descriptors.Descriptor
22615            getDescriptor() {
22616          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
22617        }
22618
22619        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
22620            internalGetFieldAccessorTable() {
22621          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable
22622              .ensureFieldAccessorsInitialized(
22623                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class);
22624        }
22625
22626        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.newBuilder()
22627        private Builder() {
22628          maybeForceBuilderInitialization();
22629        }
22630
22631        private Builder(
22632            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
22633          super(parent);
22634          maybeForceBuilderInitialization();
22635        }
22636        private void maybeForceBuilderInitialization() {
22637          if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
22638          }
22639        }
22640        private static Builder create() {
22641          return new Builder();
22642        }
22643
22644        public Builder clear() {
22645          super.clear();
22646          version_ = 0;
22647          bitField0_ = (bitField0_ & ~0x00000001);
22648          owner_ = "";
22649          bitField0_ = (bitField0_ & ~0x00000002);
22650          renewer_ = "";
22651          bitField0_ = (bitField0_ & ~0x00000004);
22652          realUser_ = "";
22653          bitField0_ = (bitField0_ & ~0x00000008);
22654          issueDate_ = 0L;
22655          bitField0_ = (bitField0_ & ~0x00000010);
22656          maxDate_ = 0L;
22657          bitField0_ = (bitField0_ & ~0x00000020);
22658          sequenceNumber_ = 0;
22659          bitField0_ = (bitField0_ & ~0x00000040);
22660          masterKeyId_ = 0;
22661          bitField0_ = (bitField0_ & ~0x00000080);
22662          expiryDate_ = 0L;
22663          bitField0_ = (bitField0_ & ~0x00000100);
22664          return this;
22665        }
22666
22667        public Builder clone() {
22668          return create().mergeFrom(buildPartial());
22669        }
22670
22671        public com.google.protobuf.Descriptors.Descriptor
22672            getDescriptorForType() {
22673          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
22674        }
22675
22676        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstanceForType() {
22677          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance();
22678        }
22679
22680        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken build() {
22681          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = buildPartial();
22682          if (!result.isInitialized()) {
22683            throw newUninitializedMessageException(result);
22684          }
22685          return result;
22686        }
22687
22688        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken buildPartial() {
22689          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken(this);
22690          int from_bitField0_ = bitField0_;
22691          int to_bitField0_ = 0;
22692          if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
22693            to_bitField0_ |= 0x00000001;
22694          }
22695          result.version_ = version_;
22696          if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
22697            to_bitField0_ |= 0x00000002;
22698          }
22699          result.owner_ = owner_;
22700          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
22701            to_bitField0_ |= 0x00000004;
22702          }
22703          result.renewer_ = renewer_;
22704          if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
22705            to_bitField0_ |= 0x00000008;
22706          }
22707          result.realUser_ = realUser_;
22708          if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
22709            to_bitField0_ |= 0x00000010;
22710          }
22711          result.issueDate_ = issueDate_;
22712          if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
22713            to_bitField0_ |= 0x00000020;
22714          }
22715          result.maxDate_ = maxDate_;
22716          if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
22717            to_bitField0_ |= 0x00000040;
22718          }
22719          result.sequenceNumber_ = sequenceNumber_;
22720          if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
22721            to_bitField0_ |= 0x00000080;
22722          }
22723          result.masterKeyId_ = masterKeyId_;
22724          if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
22725            to_bitField0_ |= 0x00000100;
22726          }
22727          result.expiryDate_ = expiryDate_;
22728          result.bitField0_ = to_bitField0_;
22729          onBuilt();
22730          return result;
22731        }
22732
22733        public Builder mergeFrom(com.google.protobuf.Message other) {
22734          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) {
22735            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken)other);
22736          } else {
22737            super.mergeFrom(other);
22738            return this;
22739          }
22740        }
22741
22742        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken other) {
22743          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance()) return this;
22744          if (other.hasVersion()) {
22745            setVersion(other.getVersion());
22746          }
22747          if (other.hasOwner()) {
22748            bitField0_ |= 0x00000002;
22749            owner_ = other.owner_;
22750            onChanged();
22751          }
22752          if (other.hasRenewer()) {
22753            bitField0_ |= 0x00000004;
22754            renewer_ = other.renewer_;
22755            onChanged();
22756          }
22757          if (other.hasRealUser()) {
22758            bitField0_ |= 0x00000008;
22759            realUser_ = other.realUser_;
22760            onChanged();
22761          }
22762          if (other.hasIssueDate()) {
22763            setIssueDate(other.getIssueDate());
22764          }
22765          if (other.hasMaxDate()) {
22766            setMaxDate(other.getMaxDate());
22767          }
22768          if (other.hasSequenceNumber()) {
22769            setSequenceNumber(other.getSequenceNumber());
22770          }
22771          if (other.hasMasterKeyId()) {
22772            setMasterKeyId(other.getMasterKeyId());
22773          }
22774          if (other.hasExpiryDate()) {
22775            setExpiryDate(other.getExpiryDate());
22776          }
22777          this.mergeUnknownFields(other.getUnknownFields());
22778          return this;
22779        }
22780
22781        public final boolean isInitialized() {
22782          return true;
22783        }
22784
22785        public Builder mergeFrom(
22786            com.google.protobuf.CodedInputStream input,
22787            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22788            throws java.io.IOException {
22789          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parsedMessage = null;
22790          try {
22791            parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
22792          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
22793            parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) e.getUnfinishedMessage();
22794            throw e;
22795          } finally {
22796            if (parsedMessage != null) {
22797              mergeFrom(parsedMessage);
22798            }
22799          }
22800          return this;
22801        }
22802        private int bitField0_;
22803
22804        // optional uint32 version = 1;
22805        private int version_ ;
22806        /**
22807         * <code>optional uint32 version = 1;</code>
22808         */
22809        public boolean hasVersion() {
22810          return ((bitField0_ & 0x00000001) == 0x00000001);
22811        }
22812        /**
22813         * <code>optional uint32 version = 1;</code>
22814         */
22815        public int getVersion() {
22816          return version_;
22817        }
22818        /**
22819         * <code>optional uint32 version = 1;</code>
22820         */
22821        public Builder setVersion(int value) {
22822          bitField0_ |= 0x00000001;
22823          version_ = value;
22824          onChanged();
22825          return this;
22826        }
22827        /**
22828         * <code>optional uint32 version = 1;</code>
22829         */
22830        public Builder clearVersion() {
22831          bitField0_ = (bitField0_ & ~0x00000001);
22832          version_ = 0;
22833          onChanged();
22834          return this;
22835        }
22836
22837        // optional string owner = 2;
22838        private java.lang.Object owner_ = "";
22839        /**
22840         * <code>optional string owner = 2;</code>
22841         */
22842        public boolean hasOwner() {
22843          return ((bitField0_ & 0x00000002) == 0x00000002);
22844        }
22845        /**
22846         * <code>optional string owner = 2;</code>
22847         */
22848        public java.lang.String getOwner() {
22849          java.lang.Object ref = owner_;
22850          if (!(ref instanceof java.lang.String)) {
22851            java.lang.String s = ((com.google.protobuf.ByteString) ref)
22852                .toStringUtf8();
22853            owner_ = s;
22854            return s;
22855          } else {
22856            return (java.lang.String) ref;
22857          }
22858        }
22859        /**
22860         * <code>optional string owner = 2;</code>
22861         */
22862        public com.google.protobuf.ByteString
22863            getOwnerBytes() {
22864          java.lang.Object ref = owner_;
22865          if (ref instanceof String) {
22866            com.google.protobuf.ByteString b = 
22867                com.google.protobuf.ByteString.copyFromUtf8(
22868                    (java.lang.String) ref);
22869            owner_ = b;
22870            return b;
22871          } else {
22872            return (com.google.protobuf.ByteString) ref;
22873          }
22874        }
22875        /**
22876         * <code>optional string owner = 2;</code>
22877         */
22878        public Builder setOwner(
22879            java.lang.String value) {
22880          if (value == null) {
22881    throw new NullPointerException();
22882  }
22883  bitField0_ |= 0x00000002;
22884          owner_ = value;
22885          onChanged();
22886          return this;
22887        }
22888        /**
22889         * <code>optional string owner = 2;</code>
22890         */
22891        public Builder clearOwner() {
22892          bitField0_ = (bitField0_ & ~0x00000002);
22893          owner_ = getDefaultInstance().getOwner();
22894          onChanged();
22895          return this;
22896        }
22897        /**
22898         * <code>optional string owner = 2;</code>
22899         */
22900        public Builder setOwnerBytes(
22901            com.google.protobuf.ByteString value) {
22902          if (value == null) {
22903    throw new NullPointerException();
22904  }
22905  bitField0_ |= 0x00000002;
22906          owner_ = value;
22907          onChanged();
22908          return this;
22909        }
22910
22911        // optional string renewer = 3;
22912        private java.lang.Object renewer_ = "";
22913        /**
22914         * <code>optional string renewer = 3;</code>
22915         */
22916        public boolean hasRenewer() {
22917          return ((bitField0_ & 0x00000004) == 0x00000004);
22918        }
22919        /**
22920         * <code>optional string renewer = 3;</code>
22921         */
22922        public java.lang.String getRenewer() {
22923          java.lang.Object ref = renewer_;
22924          if (!(ref instanceof java.lang.String)) {
22925            java.lang.String s = ((com.google.protobuf.ByteString) ref)
22926                .toStringUtf8();
22927            renewer_ = s;
22928            return s;
22929          } else {
22930            return (java.lang.String) ref;
22931          }
22932        }
22933        /**
22934         * <code>optional string renewer = 3;</code>
22935         */
22936        public com.google.protobuf.ByteString
22937            getRenewerBytes() {
22938          java.lang.Object ref = renewer_;
22939          if (ref instanceof String) {
22940            com.google.protobuf.ByteString b = 
22941                com.google.protobuf.ByteString.copyFromUtf8(
22942                    (java.lang.String) ref);
22943            renewer_ = b;
22944            return b;
22945          } else {
22946            return (com.google.protobuf.ByteString) ref;
22947          }
22948        }
22949        /**
22950         * <code>optional string renewer = 3;</code>
22951         */
22952        public Builder setRenewer(
22953            java.lang.String value) {
22954          if (value == null) {
22955    throw new NullPointerException();
22956  }
22957  bitField0_ |= 0x00000004;
22958          renewer_ = value;
22959          onChanged();
22960          return this;
22961        }
22962        /**
22963         * <code>optional string renewer = 3;</code>
22964         */
22965        public Builder clearRenewer() {
22966          bitField0_ = (bitField0_ & ~0x00000004);
22967          renewer_ = getDefaultInstance().getRenewer();
22968          onChanged();
22969          return this;
22970        }
22971        /**
22972         * <code>optional string renewer = 3;</code>
22973         */
22974        public Builder setRenewerBytes(
22975            com.google.protobuf.ByteString value) {
22976          if (value == null) {
22977    throw new NullPointerException();
22978  }
22979  bitField0_ |= 0x00000004;
22980          renewer_ = value;
22981          onChanged();
22982          return this;
22983        }
22984
22985        // optional string realUser = 4;
22986        private java.lang.Object realUser_ = "";
22987        /**
22988         * <code>optional string realUser = 4;</code>
22989         */
22990        public boolean hasRealUser() {
22991          return ((bitField0_ & 0x00000008) == 0x00000008);
22992        }
22993        /**
22994         * <code>optional string realUser = 4;</code>
22995         */
22996        public java.lang.String getRealUser() {
22997          java.lang.Object ref = realUser_;
22998          if (!(ref instanceof java.lang.String)) {
22999            java.lang.String s = ((com.google.protobuf.ByteString) ref)
23000                .toStringUtf8();
23001            realUser_ = s;
23002            return s;
23003          } else {
23004            return (java.lang.String) ref;
23005          }
23006        }
23007        /**
23008         * <code>optional string realUser = 4;</code>
23009         */
23010        public com.google.protobuf.ByteString
23011            getRealUserBytes() {
23012          java.lang.Object ref = realUser_;
23013          if (ref instanceof String) {
23014            com.google.protobuf.ByteString b = 
23015                com.google.protobuf.ByteString.copyFromUtf8(
23016                    (java.lang.String) ref);
23017            realUser_ = b;
23018            return b;
23019          } else {
23020            return (com.google.protobuf.ByteString) ref;
23021          }
23022        }
23023        /**
23024         * <code>optional string realUser = 4;</code>
23025         */
23026        public Builder setRealUser(
23027            java.lang.String value) {
23028          if (value == null) {
23029    throw new NullPointerException();
23030  }
23031  bitField0_ |= 0x00000008;
23032          realUser_ = value;
23033          onChanged();
23034          return this;
23035        }
23036        /**
23037         * <code>optional string realUser = 4;</code>
23038         */
23039        public Builder clearRealUser() {
23040          bitField0_ = (bitField0_ & ~0x00000008);
23041          realUser_ = getDefaultInstance().getRealUser();
23042          onChanged();
23043          return this;
23044        }
23045        /**
23046         * <code>optional string realUser = 4;</code>
23047         */
23048        public Builder setRealUserBytes(
23049            com.google.protobuf.ByteString value) {
23050          if (value == null) {
23051    throw new NullPointerException();
23052  }
23053  bitField0_ |= 0x00000008;
23054          realUser_ = value;
23055          onChanged();
23056          return this;
23057        }
23058
23059        // optional uint64 issueDate = 5;
23060        private long issueDate_ ;
23061        /**
23062         * <code>optional uint64 issueDate = 5;</code>
23063         */
23064        public boolean hasIssueDate() {
23065          return ((bitField0_ & 0x00000010) == 0x00000010);
23066        }
23067        /**
23068         * <code>optional uint64 issueDate = 5;</code>
23069         */
23070        public long getIssueDate() {
23071          return issueDate_;
23072        }
23073        /**
23074         * <code>optional uint64 issueDate = 5;</code>
23075         */
23076        public Builder setIssueDate(long value) {
23077          bitField0_ |= 0x00000010;
23078          issueDate_ = value;
23079          onChanged();
23080          return this;
23081        }
23082        /**
23083         * <code>optional uint64 issueDate = 5;</code>
23084         */
23085        public Builder clearIssueDate() {
23086          bitField0_ = (bitField0_ & ~0x00000010);
23087          issueDate_ = 0L;
23088          onChanged();
23089          return this;
23090        }
23091
23092        // optional uint64 maxDate = 6;
23093        private long maxDate_ ;
23094        /**
23095         * <code>optional uint64 maxDate = 6;</code>
23096         */
23097        public boolean hasMaxDate() {
23098          return ((bitField0_ & 0x00000020) == 0x00000020);
23099        }
23100        /**
23101         * <code>optional uint64 maxDate = 6;</code>
23102         */
23103        public long getMaxDate() {
23104          return maxDate_;
23105        }
23106        /**
23107         * <code>optional uint64 maxDate = 6;</code>
23108         */
23109        public Builder setMaxDate(long value) {
23110          bitField0_ |= 0x00000020;
23111          maxDate_ = value;
23112          onChanged();
23113          return this;
23114        }
23115        /**
23116         * <code>optional uint64 maxDate = 6;</code>
23117         */
23118        public Builder clearMaxDate() {
23119          bitField0_ = (bitField0_ & ~0x00000020);
23120          maxDate_ = 0L;
23121          onChanged();
23122          return this;
23123        }
23124
23125        // optional uint32 sequenceNumber = 7;
23126        private int sequenceNumber_ ;
23127        /**
23128         * <code>optional uint32 sequenceNumber = 7;</code>
23129         */
23130        public boolean hasSequenceNumber() {
23131          return ((bitField0_ & 0x00000040) == 0x00000040);
23132        }
23133        /**
23134         * <code>optional uint32 sequenceNumber = 7;</code>
23135         */
23136        public int getSequenceNumber() {
23137          return sequenceNumber_;
23138        }
23139        /**
23140         * <code>optional uint32 sequenceNumber = 7;</code>
23141         */
23142        public Builder setSequenceNumber(int value) {
23143          bitField0_ |= 0x00000040;
23144          sequenceNumber_ = value;
23145          onChanged();
23146          return this;
23147        }
23148        /**
23149         * <code>optional uint32 sequenceNumber = 7;</code>
23150         */
23151        public Builder clearSequenceNumber() {
23152          bitField0_ = (bitField0_ & ~0x00000040);
23153          sequenceNumber_ = 0;
23154          onChanged();
23155          return this;
23156        }
23157
23158        // optional uint32 masterKeyId = 8;
23159        private int masterKeyId_ ;
23160        /**
23161         * <code>optional uint32 masterKeyId = 8;</code>
23162         */
23163        public boolean hasMasterKeyId() {
23164          return ((bitField0_ & 0x00000080) == 0x00000080);
23165        }
23166        /**
23167         * <code>optional uint32 masterKeyId = 8;</code>
23168         */
23169        public int getMasterKeyId() {
23170          return masterKeyId_;
23171        }
23172        /**
23173         * <code>optional uint32 masterKeyId = 8;</code>
23174         */
23175        public Builder setMasterKeyId(int value) {
23176          bitField0_ |= 0x00000080;
23177          masterKeyId_ = value;
23178          onChanged();
23179          return this;
23180        }
23181        /**
23182         * <code>optional uint32 masterKeyId = 8;</code>
23183         */
23184        public Builder clearMasterKeyId() {
23185          bitField0_ = (bitField0_ & ~0x00000080);
23186          masterKeyId_ = 0;
23187          onChanged();
23188          return this;
23189        }
23190
23191        // optional uint64 expiryDate = 9;
23192        private long expiryDate_ ;
23193        /**
23194         * <code>optional uint64 expiryDate = 9;</code>
23195         */
23196        public boolean hasExpiryDate() {
23197          return ((bitField0_ & 0x00000100) == 0x00000100);
23198        }
23199        /**
23200         * <code>optional uint64 expiryDate = 9;</code>
23201         */
23202        public long getExpiryDate() {
23203          return expiryDate_;
23204        }
23205        /**
23206         * <code>optional uint64 expiryDate = 9;</code>
23207         */
23208        public Builder setExpiryDate(long value) {
23209          bitField0_ |= 0x00000100;
23210          expiryDate_ = value;
23211          onChanged();
23212          return this;
23213        }
23214        /**
23215         * <code>optional uint64 expiryDate = 9;</code>
23216         */
23217        public Builder clearExpiryDate() {
23218          bitField0_ = (bitField0_ & ~0x00000100);
23219          expiryDate_ = 0L;
23220          onChanged();
23221          return this;
23222        }
23223
23224        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
23225      }
23226
23227      static {
23228        defaultInstance = new PersistToken(true);
23229        defaultInstance.initFields();
23230      }
23231
23232      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
23233    }
23234
23235    private int bitField0_;
23236    // optional uint32 currentId = 1;
23237    public static final int CURRENTID_FIELD_NUMBER = 1;
23238    private int currentId_;
23239    /**
23240     * <code>optional uint32 currentId = 1;</code>
23241     */
23242    public boolean hasCurrentId() {
23243      return ((bitField0_ & 0x00000001) == 0x00000001);
23244    }
23245    /**
23246     * <code>optional uint32 currentId = 1;</code>
23247     */
23248    public int getCurrentId() {
23249      return currentId_;
23250    }
23251
23252    // optional uint32 tokenSequenceNumber = 2;
23253    public static final int TOKENSEQUENCENUMBER_FIELD_NUMBER = 2;
23254    private int tokenSequenceNumber_;
23255    /**
23256     * <code>optional uint32 tokenSequenceNumber = 2;</code>
23257     */
23258    public boolean hasTokenSequenceNumber() {
23259      return ((bitField0_ & 0x00000002) == 0x00000002);
23260    }
23261    /**
23262     * <code>optional uint32 tokenSequenceNumber = 2;</code>
23263     */
23264    public int getTokenSequenceNumber() {
23265      return tokenSequenceNumber_;
23266    }
23267
23268    // optional uint32 numKeys = 3;
23269    public static final int NUMKEYS_FIELD_NUMBER = 3;
23270    private int numKeys_;
23271    /**
23272     * <code>optional uint32 numKeys = 3;</code>
23273     */
23274    public boolean hasNumKeys() {
23275      return ((bitField0_ & 0x00000004) == 0x00000004);
23276    }
23277    /**
23278     * <code>optional uint32 numKeys = 3;</code>
23279     */
23280    public int getNumKeys() {
23281      return numKeys_;
23282    }
23283
23284    // optional uint32 numTokens = 4;
23285    public static final int NUMTOKENS_FIELD_NUMBER = 4;
23286    private int numTokens_;
23287    /**
23288     * <code>optional uint32 numTokens = 4;</code>
23289     *
23290     * <pre>
23291     * repeated DelegationKey keys
23292     * repeated PersistToken tokens
23293     * </pre>
23294     */
23295    public boolean hasNumTokens() {
23296      return ((bitField0_ & 0x00000008) == 0x00000008);
23297    }
23298    /**
23299     * <code>optional uint32 numTokens = 4;</code>
23300     *
23301     * <pre>
23302     * repeated DelegationKey keys
23303     * repeated PersistToken tokens
23304     * </pre>
23305     */
23306    public int getNumTokens() {
23307      return numTokens_;
23308    }
23309
23310    private void initFields() {
23311      currentId_ = 0;
23312      tokenSequenceNumber_ = 0;
23313      numKeys_ = 0;
23314      numTokens_ = 0;
23315    }
23316    private byte memoizedIsInitialized = -1;
23317    public final boolean isInitialized() {
23318      byte isInitialized = memoizedIsInitialized;
23319      if (isInitialized != -1) return isInitialized == 1;
23320
23321      memoizedIsInitialized = 1;
23322      return true;
23323    }
23324
23325    public void writeTo(com.google.protobuf.CodedOutputStream output)
23326                        throws java.io.IOException {
23327      getSerializedSize();
23328      if (((bitField0_ & 0x00000001) == 0x00000001)) {
23329        output.writeUInt32(1, currentId_);
23330      }
23331      if (((bitField0_ & 0x00000002) == 0x00000002)) {
23332        output.writeUInt32(2, tokenSequenceNumber_);
23333      }
23334      if (((bitField0_ & 0x00000004) == 0x00000004)) {
23335        output.writeUInt32(3, numKeys_);
23336      }
23337      if (((bitField0_ & 0x00000008) == 0x00000008)) {
23338        output.writeUInt32(4, numTokens_);
23339      }
23340      getUnknownFields().writeTo(output);
23341    }
23342
23343    private int memoizedSerializedSize = -1;
23344    public int getSerializedSize() {
23345      int size = memoizedSerializedSize;
23346      if (size != -1) return size;
23347
23348      size = 0;
23349      if (((bitField0_ & 0x00000001) == 0x00000001)) {
23350        size += com.google.protobuf.CodedOutputStream
23351          .computeUInt32Size(1, currentId_);
23352      }
23353      if (((bitField0_ & 0x00000002) == 0x00000002)) {
23354        size += com.google.protobuf.CodedOutputStream
23355          .computeUInt32Size(2, tokenSequenceNumber_);
23356      }
23357      if (((bitField0_ & 0x00000004) == 0x00000004)) {
23358        size += com.google.protobuf.CodedOutputStream
23359          .computeUInt32Size(3, numKeys_);
23360      }
23361      if (((bitField0_ & 0x00000008) == 0x00000008)) {
23362        size += com.google.protobuf.CodedOutputStream
23363          .computeUInt32Size(4, numTokens_);
23364      }
23365      size += getUnknownFields().getSerializedSize();
23366      memoizedSerializedSize = size;
23367      return size;
23368    }
23369
23370    private static final long serialVersionUID = 0L;
23371    @java.lang.Override
23372    protected java.lang.Object writeReplace()
23373        throws java.io.ObjectStreamException {
23374      return super.writeReplace();
23375    }
23376
23377    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23378        com.google.protobuf.ByteString data)
23379        throws com.google.protobuf.InvalidProtocolBufferException {
23380      return PARSER.parseFrom(data);
23381    }
23382    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23383        com.google.protobuf.ByteString data,
23384        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23385        throws com.google.protobuf.InvalidProtocolBufferException {
23386      return PARSER.parseFrom(data, extensionRegistry);
23387    }
23388    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(byte[] data)
23389        throws com.google.protobuf.InvalidProtocolBufferException {
23390      return PARSER.parseFrom(data);
23391    }
23392    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23393        byte[] data,
23394        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23395        throws com.google.protobuf.InvalidProtocolBufferException {
23396      return PARSER.parseFrom(data, extensionRegistry);
23397    }
23398    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(java.io.InputStream input)
23399        throws java.io.IOException {
23400      return PARSER.parseFrom(input);
23401    }
23402    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23403        java.io.InputStream input,
23404        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23405        throws java.io.IOException {
23406      return PARSER.parseFrom(input, extensionRegistry);
23407    }
23408    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom(java.io.InputStream input)
23409        throws java.io.IOException {
23410      return PARSER.parseDelimitedFrom(input);
23411    }
23412    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom(
23413        java.io.InputStream input,
23414        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23415        throws java.io.IOException {
23416      return PARSER.parseDelimitedFrom(input, extensionRegistry);
23417    }
23418    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23419        com.google.protobuf.CodedInputStream input)
23420        throws java.io.IOException {
23421      return PARSER.parseFrom(input);
23422    }
23423    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
23424        com.google.protobuf.CodedInputStream input,
23425        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23426        throws java.io.IOException {
23427      return PARSER.parseFrom(input, extensionRegistry);
23428    }
23429
23430    public static Builder newBuilder() { return Builder.create(); }
23431    public Builder newBuilderForType() { return newBuilder(); }
23432    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection prototype) {
23433      return newBuilder().mergeFrom(prototype);
23434    }
23435    public Builder toBuilder() { return newBuilder(this); }
23436
23437    @java.lang.Override
23438    protected Builder newBuilderForType(
23439        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
23440      Builder builder = new Builder(parent);
23441      return builder;
23442    }
23443    /**
23444     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection}
23445     */
23446    public static final class Builder extends
23447        com.google.protobuf.GeneratedMessage.Builder<Builder>
23448       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSectionOrBuilder {
23449      public static final com.google.protobuf.Descriptors.Descriptor
23450          getDescriptor() {
23451        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
23452      }
23453
23454      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
23455          internalGetFieldAccessorTable() {
23456        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable
23457            .ensureFieldAccessorsInitialized(
23458                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class);
23459      }
23460
23461      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.newBuilder()
23462      private Builder() {
23463        maybeForceBuilderInitialization();
23464      }
23465
23466      private Builder(
23467          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
23468        super(parent);
23469        maybeForceBuilderInitialization();
23470      }
23471      private void maybeForceBuilderInitialization() {
23472        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
23473        }
23474      }
23475      private static Builder create() {
23476        return new Builder();
23477      }
23478
23479      public Builder clear() {
23480        super.clear();
23481        currentId_ = 0;
23482        bitField0_ = (bitField0_ & ~0x00000001);
23483        tokenSequenceNumber_ = 0;
23484        bitField0_ = (bitField0_ & ~0x00000002);
23485        numKeys_ = 0;
23486        bitField0_ = (bitField0_ & ~0x00000004);
23487        numTokens_ = 0;
23488        bitField0_ = (bitField0_ & ~0x00000008);
23489        return this;
23490      }
23491
23492      public Builder clone() {
23493        return create().mergeFrom(buildPartial());
23494      }
23495
23496      public com.google.protobuf.Descriptors.Descriptor
23497          getDescriptorForType() {
23498        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
23499      }
23500
23501      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstanceForType() {
23502        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance();
23503      }
23504
23505      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection build() {
23506        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = buildPartial();
23507        if (!result.isInitialized()) {
23508          throw newUninitializedMessageException(result);
23509        }
23510        return result;
23511      }
23512
23513      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection buildPartial() {
23514        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection(this);
23515        int from_bitField0_ = bitField0_;
23516        int to_bitField0_ = 0;
23517        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
23518          to_bitField0_ |= 0x00000001;
23519        }
23520        result.currentId_ = currentId_;
23521        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
23522          to_bitField0_ |= 0x00000002;
23523        }
23524        result.tokenSequenceNumber_ = tokenSequenceNumber_;
23525        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
23526          to_bitField0_ |= 0x00000004;
23527        }
23528        result.numKeys_ = numKeys_;
23529        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
23530          to_bitField0_ |= 0x00000008;
23531        }
23532        result.numTokens_ = numTokens_;
23533        result.bitField0_ = to_bitField0_;
23534        onBuilt();
23535        return result;
23536      }
23537
23538      public Builder mergeFrom(com.google.protobuf.Message other) {
23539        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) {
23540          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection)other);
23541        } else {
23542          super.mergeFrom(other);
23543          return this;
23544        }
23545      }
23546
23547      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection other) {
23548        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance()) return this;
23549        if (other.hasCurrentId()) {
23550          setCurrentId(other.getCurrentId());
23551        }
23552        if (other.hasTokenSequenceNumber()) {
23553          setTokenSequenceNumber(other.getTokenSequenceNumber());
23554        }
23555        if (other.hasNumKeys()) {
23556          setNumKeys(other.getNumKeys());
23557        }
23558        if (other.hasNumTokens()) {
23559          setNumTokens(other.getNumTokens());
23560        }
23561        this.mergeUnknownFields(other.getUnknownFields());
23562        return this;
23563      }
23564
23565      public final boolean isInitialized() {
23566        return true;
23567      }
23568
23569      public Builder mergeFrom(
23570          com.google.protobuf.CodedInputStream input,
23571          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23572          throws java.io.IOException {
23573        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parsedMessage = null;
23574        try {
23575          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
23576        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
23577          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) e.getUnfinishedMessage();
23578          throw e;
23579        } finally {
23580          if (parsedMessage != null) {
23581            mergeFrom(parsedMessage);
23582          }
23583        }
23584        return this;
23585      }
23586      private int bitField0_;
23587
23588      // optional uint32 currentId = 1;
23589      private int currentId_ ;
23590      /**
23591       * <code>optional uint32 currentId = 1;</code>
23592       */
23593      public boolean hasCurrentId() {
23594        return ((bitField0_ & 0x00000001) == 0x00000001);
23595      }
23596      /**
23597       * <code>optional uint32 currentId = 1;</code>
23598       */
23599      public int getCurrentId() {
23600        return currentId_;
23601      }
23602      /**
23603       * <code>optional uint32 currentId = 1;</code>
23604       */
23605      public Builder setCurrentId(int value) {
23606        bitField0_ |= 0x00000001;
23607        currentId_ = value;
23608        onChanged();
23609        return this;
23610      }
23611      /**
23612       * <code>optional uint32 currentId = 1;</code>
23613       */
23614      public Builder clearCurrentId() {
23615        bitField0_ = (bitField0_ & ~0x00000001);
23616        currentId_ = 0;
23617        onChanged();
23618        return this;
23619      }
23620
23621      // optional uint32 tokenSequenceNumber = 2;
23622      private int tokenSequenceNumber_ ;
23623      /**
23624       * <code>optional uint32 tokenSequenceNumber = 2;</code>
23625       */
23626      public boolean hasTokenSequenceNumber() {
23627        return ((bitField0_ & 0x00000002) == 0x00000002);
23628      }
23629      /**
23630       * <code>optional uint32 tokenSequenceNumber = 2;</code>
23631       */
23632      public int getTokenSequenceNumber() {
23633        return tokenSequenceNumber_;
23634      }
23635      /**
23636       * <code>optional uint32 tokenSequenceNumber = 2;</code>
23637       */
23638      public Builder setTokenSequenceNumber(int value) {
23639        bitField0_ |= 0x00000002;
23640        tokenSequenceNumber_ = value;
23641        onChanged();
23642        return this;
23643      }
23644      /**
23645       * <code>optional uint32 tokenSequenceNumber = 2;</code>
23646       */
23647      public Builder clearTokenSequenceNumber() {
23648        bitField0_ = (bitField0_ & ~0x00000002);
23649        tokenSequenceNumber_ = 0;
23650        onChanged();
23651        return this;
23652      }
23653
23654      // optional uint32 numKeys = 3;
23655      private int numKeys_ ;
23656      /**
23657       * <code>optional uint32 numKeys = 3;</code>
23658       */
23659      public boolean hasNumKeys() {
23660        return ((bitField0_ & 0x00000004) == 0x00000004);
23661      }
23662      /**
23663       * <code>optional uint32 numKeys = 3;</code>
23664       */
23665      public int getNumKeys() {
23666        return numKeys_;
23667      }
23668      /**
23669       * <code>optional uint32 numKeys = 3;</code>
23670       */
23671      public Builder setNumKeys(int value) {
23672        bitField0_ |= 0x00000004;
23673        numKeys_ = value;
23674        onChanged();
23675        return this;
23676      }
23677      /**
23678       * <code>optional uint32 numKeys = 3;</code>
23679       */
23680      public Builder clearNumKeys() {
23681        bitField0_ = (bitField0_ & ~0x00000004);
23682        numKeys_ = 0;
23683        onChanged();
23684        return this;
23685      }
23686
23687      // optional uint32 numTokens = 4;
23688      private int numTokens_ ;
23689      /**
23690       * <code>optional uint32 numTokens = 4;</code>
23691       *
23692       * <pre>
23693       * repeated DelegationKey keys
23694       * repeated PersistToken tokens
23695       * </pre>
23696       */
23697      public boolean hasNumTokens() {
23698        return ((bitField0_ & 0x00000008) == 0x00000008);
23699      }
23700      /**
23701       * <code>optional uint32 numTokens = 4;</code>
23702       *
23703       * <pre>
23704       * repeated DelegationKey keys
23705       * repeated PersistToken tokens
23706       * </pre>
23707       */
23708      public int getNumTokens() {
23709        return numTokens_;
23710      }
23711      /**
23712       * <code>optional uint32 numTokens = 4;</code>
23713       *
23714       * <pre>
23715       * repeated DelegationKey keys
23716       * repeated PersistToken tokens
23717       * </pre>
23718       */
23719      public Builder setNumTokens(int value) {
23720        bitField0_ |= 0x00000008;
23721        numTokens_ = value;
23722        onChanged();
23723        return this;
23724      }
23725      /**
23726       * <code>optional uint32 numTokens = 4;</code>
23727       *
23728       * <pre>
23729       * repeated DelegationKey keys
23730       * repeated PersistToken tokens
23731       * </pre>
23732       */
23733      public Builder clearNumTokens() {
23734        bitField0_ = (bitField0_ & ~0x00000008);
23735        numTokens_ = 0;
23736        onChanged();
23737        return this;
23738      }
23739
23740      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection)
23741    }
23742
23743    static {
23744      defaultInstance = new SecretManagerSection(true);
23745      defaultInstance.initFields();
23746    }
23747
23748    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection)
23749  }
23750
23751  public interface CacheManagerSectionOrBuilder
23752      extends com.google.protobuf.MessageOrBuilder {
23753
23754    // required uint64 nextDirectiveId = 1;
23755    /**
23756     * <code>required uint64 nextDirectiveId = 1;</code>
23757     */
23758    boolean hasNextDirectiveId();
23759    /**
23760     * <code>required uint64 nextDirectiveId = 1;</code>
23761     */
23762    long getNextDirectiveId();
23763
23764    // required uint32 numPools = 2;
23765    /**
23766     * <code>required uint32 numPools = 2;</code>
23767     */
23768    boolean hasNumPools();
23769    /**
23770     * <code>required uint32 numPools = 2;</code>
23771     */
23772    int getNumPools();
23773
23774    // required uint32 numDirectives = 3;
23775    /**
23776     * <code>required uint32 numDirectives = 3;</code>
23777     *
23778     * <pre>
23779     * repeated CachePoolInfoProto pools
23780     * repeated CacheDirectiveInfoProto directives
23781     * </pre>
23782     */
23783    boolean hasNumDirectives();
23784    /**
23785     * <code>required uint32 numDirectives = 3;</code>
23786     *
23787     * <pre>
23788     * repeated CachePoolInfoProto pools
23789     * repeated CacheDirectiveInfoProto directives
23790     * </pre>
23791     */
23792    int getNumDirectives();
23793  }
23794  /**
23795   * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection}
23796   */
23797  public static final class CacheManagerSection extends
23798      com.google.protobuf.GeneratedMessage
23799      implements CacheManagerSectionOrBuilder {
23800    // Use CacheManagerSection.newBuilder() to construct.
23801    private CacheManagerSection(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
23802      super(builder);
23803      this.unknownFields = builder.getUnknownFields();
23804    }
23805    private CacheManagerSection(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
23806
23807    private static final CacheManagerSection defaultInstance;
23808    public static CacheManagerSection getDefaultInstance() {
23809      return defaultInstance;
23810    }
23811
23812    public CacheManagerSection getDefaultInstanceForType() {
23813      return defaultInstance;
23814    }
23815
23816    private final com.google.protobuf.UnknownFieldSet unknownFields;
23817    @java.lang.Override
23818    public final com.google.protobuf.UnknownFieldSet
23819        getUnknownFields() {
23820      return this.unknownFields;
23821    }
23822    private CacheManagerSection(
23823        com.google.protobuf.CodedInputStream input,
23824        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23825        throws com.google.protobuf.InvalidProtocolBufferException {
23826      initFields();
23827      int mutable_bitField0_ = 0;
23828      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
23829          com.google.protobuf.UnknownFieldSet.newBuilder();
23830      try {
23831        boolean done = false;
23832        while (!done) {
23833          int tag = input.readTag();
23834          switch (tag) {
23835            case 0:
23836              done = true;
23837              break;
23838            default: {
23839              if (!parseUnknownField(input, unknownFields,
23840                                     extensionRegistry, tag)) {
23841                done = true;
23842              }
23843              break;
23844            }
23845            case 8: {
23846              bitField0_ |= 0x00000001;
23847              nextDirectiveId_ = input.readUInt64();
23848              break;
23849            }
23850            case 16: {
23851              bitField0_ |= 0x00000002;
23852              numPools_ = input.readUInt32();
23853              break;
23854            }
23855            case 24: {
23856              bitField0_ |= 0x00000004;
23857              numDirectives_ = input.readUInt32();
23858              break;
23859            }
23860          }
23861        }
23862      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
23863        throw e.setUnfinishedMessage(this);
23864      } catch (java.io.IOException e) {
23865        throw new com.google.protobuf.InvalidProtocolBufferException(
23866            e.getMessage()).setUnfinishedMessage(this);
23867      } finally {
23868        this.unknownFields = unknownFields.build();
23869        makeExtensionsImmutable();
23870      }
23871    }
23872    public static final com.google.protobuf.Descriptors.Descriptor
23873        getDescriptor() {
23874      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
23875    }
23876
23877    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
23878        internalGetFieldAccessorTable() {
23879      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable
23880          .ensureFieldAccessorsInitialized(
23881              org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class);
23882    }
23883
23884    public static com.google.protobuf.Parser<CacheManagerSection> PARSER =
23885        new com.google.protobuf.AbstractParser<CacheManagerSection>() {
23886      public CacheManagerSection parsePartialFrom(
23887          com.google.protobuf.CodedInputStream input,
23888          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23889          throws com.google.protobuf.InvalidProtocolBufferException {
23890        return new CacheManagerSection(input, extensionRegistry);
23891      }
23892    };
23893
23894    @java.lang.Override
23895    public com.google.protobuf.Parser<CacheManagerSection> getParserForType() {
23896      return PARSER;
23897    }
23898
23899    private int bitField0_;
23900    // required uint64 nextDirectiveId = 1;
23901    public static final int NEXTDIRECTIVEID_FIELD_NUMBER = 1;
23902    private long nextDirectiveId_;
23903    /**
23904     * <code>required uint64 nextDirectiveId = 1;</code>
23905     */
23906    public boolean hasNextDirectiveId() {
23907      return ((bitField0_ & 0x00000001) == 0x00000001);
23908    }
23909    /**
23910     * <code>required uint64 nextDirectiveId = 1;</code>
23911     */
23912    public long getNextDirectiveId() {
23913      return nextDirectiveId_;
23914    }
23915
23916    // required uint32 numPools = 2;
23917    public static final int NUMPOOLS_FIELD_NUMBER = 2;
23918    private int numPools_;
23919    /**
23920     * <code>required uint32 numPools = 2;</code>
23921     */
23922    public boolean hasNumPools() {
23923      return ((bitField0_ & 0x00000002) == 0x00000002);
23924    }
23925    /**
23926     * <code>required uint32 numPools = 2;</code>
23927     */
23928    public int getNumPools() {
23929      return numPools_;
23930    }
23931
23932    // required uint32 numDirectives = 3;
23933    public static final int NUMDIRECTIVES_FIELD_NUMBER = 3;
23934    private int numDirectives_;
23935    /**
23936     * <code>required uint32 numDirectives = 3;</code>
23937     *
23938     * <pre>
23939     * repeated CachePoolInfoProto pools
23940     * repeated CacheDirectiveInfoProto directives
23941     * </pre>
23942     */
23943    public boolean hasNumDirectives() {
23944      return ((bitField0_ & 0x00000004) == 0x00000004);
23945    }
23946    /**
23947     * <code>required uint32 numDirectives = 3;</code>
23948     *
23949     * <pre>
23950     * repeated CachePoolInfoProto pools
23951     * repeated CacheDirectiveInfoProto directives
23952     * </pre>
23953     */
23954    public int getNumDirectives() {
23955      return numDirectives_;
23956    }
23957
23958    private void initFields() {
23959      nextDirectiveId_ = 0L;
23960      numPools_ = 0;
23961      numDirectives_ = 0;
23962    }
23963    private byte memoizedIsInitialized = -1;
23964    public final boolean isInitialized() {
23965      byte isInitialized = memoizedIsInitialized;
23966      if (isInitialized != -1) return isInitialized == 1;
23967
23968      if (!hasNextDirectiveId()) {
23969        memoizedIsInitialized = 0;
23970        return false;
23971      }
23972      if (!hasNumPools()) {
23973        memoizedIsInitialized = 0;
23974        return false;
23975      }
23976      if (!hasNumDirectives()) {
23977        memoizedIsInitialized = 0;
23978        return false;
23979      }
23980      memoizedIsInitialized = 1;
23981      return true;
23982    }
23983
23984    public void writeTo(com.google.protobuf.CodedOutputStream output)
23985                        throws java.io.IOException {
23986      getSerializedSize();
23987      if (((bitField0_ & 0x00000001) == 0x00000001)) {
23988        output.writeUInt64(1, nextDirectiveId_);
23989      }
23990      if (((bitField0_ & 0x00000002) == 0x00000002)) {
23991        output.writeUInt32(2, numPools_);
23992      }
23993      if (((bitField0_ & 0x00000004) == 0x00000004)) {
23994        output.writeUInt32(3, numDirectives_);
23995      }
23996      getUnknownFields().writeTo(output);
23997    }
23998
23999    private int memoizedSerializedSize = -1;
24000    public int getSerializedSize() {
24001      int size = memoizedSerializedSize;
24002      if (size != -1) return size;
24003
24004      size = 0;
24005      if (((bitField0_ & 0x00000001) == 0x00000001)) {
24006        size += com.google.protobuf.CodedOutputStream
24007          .computeUInt64Size(1, nextDirectiveId_);
24008      }
24009      if (((bitField0_ & 0x00000002) == 0x00000002)) {
24010        size += com.google.protobuf.CodedOutputStream
24011          .computeUInt32Size(2, numPools_);
24012      }
24013      if (((bitField0_ & 0x00000004) == 0x00000004)) {
24014        size += com.google.protobuf.CodedOutputStream
24015          .computeUInt32Size(3, numDirectives_);
24016      }
24017      size += getUnknownFields().getSerializedSize();
24018      memoizedSerializedSize = size;
24019      return size;
24020    }
24021
24022    private static final long serialVersionUID = 0L;
24023    @java.lang.Override
24024    protected java.lang.Object writeReplace()
24025        throws java.io.ObjectStreamException {
24026      return super.writeReplace();
24027    }
24028
24029    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24030        com.google.protobuf.ByteString data)
24031        throws com.google.protobuf.InvalidProtocolBufferException {
24032      return PARSER.parseFrom(data);
24033    }
24034    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24035        com.google.protobuf.ByteString data,
24036        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24037        throws com.google.protobuf.InvalidProtocolBufferException {
24038      return PARSER.parseFrom(data, extensionRegistry);
24039    }
24040    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(byte[] data)
24041        throws com.google.protobuf.InvalidProtocolBufferException {
24042      return PARSER.parseFrom(data);
24043    }
24044    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24045        byte[] data,
24046        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24047        throws com.google.protobuf.InvalidProtocolBufferException {
24048      return PARSER.parseFrom(data, extensionRegistry);
24049    }
24050    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(java.io.InputStream input)
24051        throws java.io.IOException {
24052      return PARSER.parseFrom(input);
24053    }
24054    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24055        java.io.InputStream input,
24056        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24057        throws java.io.IOException {
24058      return PARSER.parseFrom(input, extensionRegistry);
24059    }
24060    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom(java.io.InputStream input)
24061        throws java.io.IOException {
24062      return PARSER.parseDelimitedFrom(input);
24063    }
24064    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom(
24065        java.io.InputStream input,
24066        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24067        throws java.io.IOException {
24068      return PARSER.parseDelimitedFrom(input, extensionRegistry);
24069    }
24070    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24071        com.google.protobuf.CodedInputStream input)
24072        throws java.io.IOException {
24073      return PARSER.parseFrom(input);
24074    }
24075    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
24076        com.google.protobuf.CodedInputStream input,
24077        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24078        throws java.io.IOException {
24079      return PARSER.parseFrom(input, extensionRegistry);
24080    }
24081
24082    public static Builder newBuilder() { return Builder.create(); }
24083    public Builder newBuilderForType() { return newBuilder(); }
24084    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection prototype) {
24085      return newBuilder().mergeFrom(prototype);
24086    }
24087    public Builder toBuilder() { return newBuilder(this); }
24088
24089    @java.lang.Override
24090    protected Builder newBuilderForType(
24091        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24092      Builder builder = new Builder(parent);
24093      return builder;
24094    }
24095    /**
24096     * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection}
24097     */
24098    public static final class Builder extends
24099        com.google.protobuf.GeneratedMessage.Builder<Builder>
24100       implements org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSectionOrBuilder {
24101      public static final com.google.protobuf.Descriptors.Descriptor
24102          getDescriptor() {
24103        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
24104      }
24105
24106      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
24107          internalGetFieldAccessorTable() {
24108        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable
24109            .ensureFieldAccessorsInitialized(
24110                org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class);
24111      }
24112
24113      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.newBuilder()
24114      private Builder() {
24115        maybeForceBuilderInitialization();
24116      }
24117
24118      private Builder(
24119          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24120        super(parent);
24121        maybeForceBuilderInitialization();
24122      }
24123      private void maybeForceBuilderInitialization() {
24124        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
24125        }
24126      }
24127      private static Builder create() {
24128        return new Builder();
24129      }
24130
24131      public Builder clear() {
24132        super.clear();
24133        nextDirectiveId_ = 0L;
24134        bitField0_ = (bitField0_ & ~0x00000001);
24135        numPools_ = 0;
24136        bitField0_ = (bitField0_ & ~0x00000002);
24137        numDirectives_ = 0;
24138        bitField0_ = (bitField0_ & ~0x00000004);
24139        return this;
24140      }
24141
24142      public Builder clone() {
24143        return create().mergeFrom(buildPartial());
24144      }
24145
24146      public com.google.protobuf.Descriptors.Descriptor
24147          getDescriptorForType() {
24148        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
24149      }
24150
24151      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstanceForType() {
24152        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance();
24153      }
24154
24155      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection build() {
24156        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = buildPartial();
24157        if (!result.isInitialized()) {
24158          throw newUninitializedMessageException(result);
24159        }
24160        return result;
24161      }
24162
24163      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection buildPartial() {
24164        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection(this);
24165        int from_bitField0_ = bitField0_;
24166        int to_bitField0_ = 0;
24167        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
24168          to_bitField0_ |= 0x00000001;
24169        }
24170        result.nextDirectiveId_ = nextDirectiveId_;
24171        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
24172          to_bitField0_ |= 0x00000002;
24173        }
24174        result.numPools_ = numPools_;
24175        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
24176          to_bitField0_ |= 0x00000004;
24177        }
24178        result.numDirectives_ = numDirectives_;
24179        result.bitField0_ = to_bitField0_;
24180        onBuilt();
24181        return result;
24182      }
24183
24184      public Builder mergeFrom(com.google.protobuf.Message other) {
24185        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) {
24186          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)other);
24187        } else {
24188          super.mergeFrom(other);
24189          return this;
24190        }
24191      }
24192
24193      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection other) {
24194        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance()) return this;
24195        if (other.hasNextDirectiveId()) {
24196          setNextDirectiveId(other.getNextDirectiveId());
24197        }
24198        if (other.hasNumPools()) {
24199          setNumPools(other.getNumPools());
24200        }
24201        if (other.hasNumDirectives()) {
24202          setNumDirectives(other.getNumDirectives());
24203        }
24204        this.mergeUnknownFields(other.getUnknownFields());
24205        return this;
24206      }
24207
24208      public final boolean isInitialized() {
24209        if (!hasNextDirectiveId()) {
24210          
24211          return false;
24212        }
24213        if (!hasNumPools()) {
24214          
24215          return false;
24216        }
24217        if (!hasNumDirectives()) {
24218          
24219          return false;
24220        }
24221        return true;
24222      }
24223
24224      public Builder mergeFrom(
24225          com.google.protobuf.CodedInputStream input,
24226          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24227          throws java.io.IOException {
24228        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parsedMessage = null;
24229        try {
24230          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
24231        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
24232          parsedMessage = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) e.getUnfinishedMessage();
24233          throw e;
24234        } finally {
24235          if (parsedMessage != null) {
24236            mergeFrom(parsedMessage);
24237          }
24238        }
24239        return this;
24240      }
24241      private int bitField0_;
24242
24243      // required uint64 nextDirectiveId = 1;
24244      private long nextDirectiveId_ ;
24245      /**
24246       * <code>required uint64 nextDirectiveId = 1;</code>
24247       */
24248      public boolean hasNextDirectiveId() {
24249        return ((bitField0_ & 0x00000001) == 0x00000001);
24250      }
24251      /**
24252       * <code>required uint64 nextDirectiveId = 1;</code>
24253       */
24254      public long getNextDirectiveId() {
24255        return nextDirectiveId_;
24256      }
24257      /**
24258       * <code>required uint64 nextDirectiveId = 1;</code>
24259       */
24260      public Builder setNextDirectiveId(long value) {
24261        bitField0_ |= 0x00000001;
24262        nextDirectiveId_ = value;
24263        onChanged();
24264        return this;
24265      }
24266      /**
24267       * <code>required uint64 nextDirectiveId = 1;</code>
24268       */
24269      public Builder clearNextDirectiveId() {
24270        bitField0_ = (bitField0_ & ~0x00000001);
24271        nextDirectiveId_ = 0L;
24272        onChanged();
24273        return this;
24274      }
24275
24276      // required uint32 numPools = 2;
24277      private int numPools_ ;
24278      /**
24279       * <code>required uint32 numPools = 2;</code>
24280       */
24281      public boolean hasNumPools() {
24282        return ((bitField0_ & 0x00000002) == 0x00000002);
24283      }
24284      /**
24285       * <code>required uint32 numPools = 2;</code>
24286       */
24287      public int getNumPools() {
24288        return numPools_;
24289      }
24290      /**
24291       * <code>required uint32 numPools = 2;</code>
24292       */
24293      public Builder setNumPools(int value) {
24294        bitField0_ |= 0x00000002;
24295        numPools_ = value;
24296        onChanged();
24297        return this;
24298      }
24299      /**
24300       * <code>required uint32 numPools = 2;</code>
24301       */
24302      public Builder clearNumPools() {
24303        bitField0_ = (bitField0_ & ~0x00000002);
24304        numPools_ = 0;
24305        onChanged();
24306        return this;
24307      }
24308
24309      // required uint32 numDirectives = 3;
24310      private int numDirectives_ ;
24311      /**
24312       * <code>required uint32 numDirectives = 3;</code>
24313       *
24314       * <pre>
24315       * repeated CachePoolInfoProto pools
24316       * repeated CacheDirectiveInfoProto directives
24317       * </pre>
24318       */
24319      public boolean hasNumDirectives() {
24320        return ((bitField0_ & 0x00000004) == 0x00000004);
24321      }
24322      /**
24323       * <code>required uint32 numDirectives = 3;</code>
24324       *
24325       * <pre>
24326       * repeated CachePoolInfoProto pools
24327       * repeated CacheDirectiveInfoProto directives
24328       * </pre>
24329       */
24330      public int getNumDirectives() {
24331        return numDirectives_;
24332      }
24333      /**
24334       * <code>required uint32 numDirectives = 3;</code>
24335       *
24336       * <pre>
24337       * repeated CachePoolInfoProto pools
24338       * repeated CacheDirectiveInfoProto directives
24339       * </pre>
24340       */
24341      public Builder setNumDirectives(int value) {
24342        bitField0_ |= 0x00000004;
24343        numDirectives_ = value;
24344        onChanged();
24345        return this;
24346      }
24347      /**
24348       * <code>required uint32 numDirectives = 3;</code>
24349       *
24350       * <pre>
24351       * repeated CachePoolInfoProto pools
24352       * repeated CacheDirectiveInfoProto directives
24353       * </pre>
24354       */
24355      public Builder clearNumDirectives() {
24356        bitField0_ = (bitField0_ & ~0x00000004);
24357        numDirectives_ = 0;
24358        onChanged();
24359        return this;
24360      }
24361
24362      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.CacheManagerSection)
24363    }
24364
24365    static {
24366      defaultInstance = new CacheManagerSection(true);
24367      defaultInstance.initFields();
24368    }
24369
24370    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.CacheManagerSection)
24371  }
24372
24373  private static com.google.protobuf.Descriptors.Descriptor
24374    internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
24375  private static
24376    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24377      internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable;
24378  private static com.google.protobuf.Descriptors.Descriptor
24379    internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
24380  private static
24381    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24382      internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable;
24383  private static com.google.protobuf.Descriptors.Descriptor
24384    internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
24385  private static
24386    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24387      internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable;
24388  private static com.google.protobuf.Descriptors.Descriptor
24389    internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
24390  private static
24391    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24392      internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable;
24393  private static com.google.protobuf.Descriptors.Descriptor
24394    internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
24395  private static
24396    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24397      internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable;
24398  private static com.google.protobuf.Descriptors.Descriptor
24399    internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
24400  private static
24401    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24402      internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable;
24403  private static com.google.protobuf.Descriptors.Descriptor
24404    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
24405  private static
24406    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24407      internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable;
24408  private static com.google.protobuf.Descriptors.Descriptor
24409    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
24410  private static
24411    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24412      internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable;
24413  private static com.google.protobuf.Descriptors.Descriptor
24414    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
24415  private static
24416    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24417      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable;
24418  private static com.google.protobuf.Descriptors.Descriptor
24419    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
24420  private static
24421    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24422      internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable;
24423  private static com.google.protobuf.Descriptors.Descriptor
24424    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
24425  private static
24426    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24427      internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable;
24428  private static com.google.protobuf.Descriptors.Descriptor
24429    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
24430  private static
24431    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24432      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable;
24433  private static com.google.protobuf.Descriptors.Descriptor
24434    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
24435  private static
24436    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24437      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable;
24438  private static com.google.protobuf.Descriptors.Descriptor
24439    internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
24440  private static
24441    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24442      internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable;
24443  private static com.google.protobuf.Descriptors.Descriptor
24444    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
24445  private static
24446    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24447      internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable;
24448  private static com.google.protobuf.Descriptors.Descriptor
24449    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
24450  private static
24451    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24452      internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable;
24453  private static com.google.protobuf.Descriptors.Descriptor
24454    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
24455  private static
24456    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24457      internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable;
24458  private static com.google.protobuf.Descriptors.Descriptor
24459    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
24460  private static
24461    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24462      internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable;
24463  private static com.google.protobuf.Descriptors.Descriptor
24464    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
24465  private static
24466    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24467      internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable;
24468  private static com.google.protobuf.Descriptors.Descriptor
24469    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
24470  private static
24471    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24472      internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable;
24473  private static com.google.protobuf.Descriptors.Descriptor
24474    internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
24475  private static
24476    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24477      internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable;
24478  private static com.google.protobuf.Descriptors.Descriptor
24479    internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
24480  private static
24481    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24482      internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable;
24483  private static com.google.protobuf.Descriptors.Descriptor
24484    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
24485  private static
24486    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24487      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable;
24488  private static com.google.protobuf.Descriptors.Descriptor
24489    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
24490  private static
24491    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24492      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable;
24493  private static com.google.protobuf.Descriptors.Descriptor
24494    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
24495  private static
24496    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24497      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable;
24498  private static com.google.protobuf.Descriptors.Descriptor
24499    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
24500  private static
24501    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24502      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable;
24503  private static com.google.protobuf.Descriptors.Descriptor
24504    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
24505  private static
24506    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24507      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable;
24508  private static com.google.protobuf.Descriptors.Descriptor
24509    internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
24510  private static
24511    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24512      internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable;
24513  private static com.google.protobuf.Descriptors.Descriptor
24514    internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
24515  private static
24516    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24517      internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable;
24518  private static com.google.protobuf.Descriptors.Descriptor
24519    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
24520  private static
24521    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24522      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable;
24523  private static com.google.protobuf.Descriptors.Descriptor
24524    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
24525  private static
24526    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24527      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable;
24528  private static com.google.protobuf.Descriptors.Descriptor
24529    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
24530  private static
24531    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24532      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable;
24533  private static com.google.protobuf.Descriptors.Descriptor
24534    internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
24535  private static
24536    com.google.protobuf.GeneratedMessage.FieldAccessorTable
24537      internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable;
24538
24539  public static com.google.protobuf.Descriptors.FileDescriptor
24540      getDescriptor() {
24541    return descriptor;
24542  }
24543  private static com.google.protobuf.Descriptors.FileDescriptor
24544      descriptor;
24545  static {
24546    java.lang.String[] descriptorData = {
24547      "\n\rfsimage.proto\022\023hadoop.hdfs.fsimage\032\nhd" +
24548      "fs.proto\032\tacl.proto\032\013xattr.proto\"\277\001\n\013Fil" +
24549      "eSummary\022\025\n\rondiskVersion\030\001 \002(\r\022\025\n\rlayou" +
24550      "tVersion\030\002 \002(\r\022\r\n\005codec\030\003 \001(\t\022:\n\010section" +
24551      "s\030\004 \003(\0132(.hadoop.hdfs.fsimage.FileSummar" +
24552      "y.Section\0327\n\007Section\022\014\n\004name\030\001 \001(\t\022\016\n\006le" +
24553      "ngth\030\002 \001(\004\022\016\n\006offset\030\003 \001(\004\"\277\001\n\021NameSyste" +
24554      "mSection\022\023\n\013namespaceId\030\001 \001(\r\022\022\n\ngenstam" +
24555      "pV1\030\002 \001(\004\022\022\n\ngenstampV2\030\003 \001(\004\022\027\n\017genstam" +
24556      "pV1Limit\030\004 \001(\004\022\034\n\024lastAllocatedBlockId\030\005",
24557      " \001(\004\022\025\n\rtransactionId\030\006 \001(\004\022\037\n\027rollingUp" +
24558      "gradeStartTime\030\007 \001(\004\"\221\r\n\014INodeSection\022\023\n" +
24559      "\013lastInodeId\030\001 \001(\004\022\021\n\tnumInodes\030\002 \001(\004\032I\n" +
24560      "\034FileUnderConstructionFeature\022\022\n\nclientN" +
24561      "ame\030\001 \001(\t\022\025\n\rclientMachine\030\002 \001(\t\032&\n\017AclF" +
24562      "eatureProto\022\023\n\007entries\030\002 \003(\007B\002\020\001\0320\n\021XAtt" +
24563      "rCompactProto\022\014\n\004name\030\001 \002(\007\022\r\n\005value\030\002 \001" +
24564      "(\014\032X\n\021XAttrFeatureProto\022C\n\006xAttrs\030\001 \003(\0132" +
24565      "3.hadoop.hdfs.fsimage.INodeSection.XAttr" +
24566      "CompactProto\032\225\003\n\tINodeFile\022\023\n\013replicatio",
24567      "n\030\001 \001(\r\022\030\n\020modificationTime\030\002 \001(\004\022\022\n\nacc" +
24568      "essTime\030\003 \001(\004\022\032\n\022preferredBlockSize\030\004 \001(" +
24569      "\004\022\022\n\npermission\030\005 \001(\006\022\'\n\006blocks\030\006 \003(\0132\027." +
24570      "hadoop.hdfs.BlockProto\022N\n\006fileUC\030\007 \001(\0132>" +
24571      ".hadoop.hdfs.fsimage.INodeSection.FileUn" +
24572      "derConstructionFeature\022>\n\003acl\030\010 \001(\01321.ha" +
24573      "doop.hdfs.fsimage.INodeSection.AclFeatur" +
24574      "eProto\022C\n\006xAttrs\030\t \001(\01323.hadoop.hdfs.fsi" +
24575      "mage.INodeSection.XAttrFeatureProto\022\027\n\017s" +
24576      "toragePolicyID\030\n \001(\r\032a\n\034QuotaByStorageTy",
24577      "peEntryProto\0222\n\013storageType\030\001 \002(\0162\035.hado" +
24578      "op.hdfs.StorageTypeProto\022\r\n\005quota\030\002 \002(\004\032" +
24579      "p\n\036QuotaByStorageTypeFeatureProto\022N\n\006quo" +
24580      "tas\030\001 \003(\0132>.hadoop.hdfs.fsimage.INodeSec" +
24581      "tion.QuotaByStorageTypeEntryProto\032\273\002\n\016IN" +
24582      "odeDirectory\022\030\n\020modificationTime\030\001 \001(\004\022\017" +
24583      "\n\007nsQuota\030\002 \001(\004\022\017\n\007dsQuota\030\003 \001(\004\022\022\n\nperm" +
24584      "ission\030\004 \001(\006\022>\n\003acl\030\005 \001(\01321.hadoop.hdfs." +
24585      "fsimage.INodeSection.AclFeatureProto\022C\n\006" +
24586      "xAttrs\030\006 \001(\01323.hadoop.hdfs.fsimage.INode",
24587      "Section.XAttrFeatureProto\022T\n\ntypeQuotas\030" +
24588      "\007 \001(\0132@.hadoop.hdfs.fsimage.INodeSection" +
24589      ".QuotaByStorageTypeFeatureProto\032`\n\014INode" +
24590      "Symlink\022\022\n\npermission\030\001 \001(\006\022\016\n\006target\030\002 " +
24591      "\001(\014\022\030\n\020modificationTime\030\003 \001(\004\022\022\n\naccessT" +
24592      "ime\030\004 \001(\004\032\314\002\n\005INode\022:\n\004type\030\001 \002(\0162,.hado" +
24593      "op.hdfs.fsimage.INodeSection.INode.Type\022" +
24594      "\n\n\002id\030\002 \002(\004\022\014\n\004name\030\003 \001(\014\0229\n\004file\030\004 \001(\0132" +
24595      "+.hadoop.hdfs.fsimage.INodeSection.INode" +
24596      "File\022C\n\tdirectory\030\005 \001(\01320.hadoop.hdfs.fs",
24597      "image.INodeSection.INodeDirectory\022?\n\007sym" +
24598      "link\030\006 \001(\0132..hadoop.hdfs.fsimage.INodeSe" +
24599      "ction.INodeSymlink\",\n\004Type\022\010\n\004FILE\020\001\022\r\n\t" +
24600      "DIRECTORY\020\002\022\013\n\007SYMLINK\020\003\"`\n\035FilesUnderCo" +
24601      "nstructionSection\032?\n\032FileUnderConstructi" +
24602      "onEntry\022\017\n\007inodeId\030\001 \001(\004\022\020\n\010fullPath\030\002 \001" +
24603      "(\t\"b\n\025INodeDirectorySection\032I\n\010DirEntry\022" +
24604      "\016\n\006parent\030\001 \001(\004\022\024\n\010children\030\002 \003(\004B\002\020\001\022\027\n" +
24605      "\013refChildren\030\003 \003(\rB\002\020\001\"z\n\025INodeReference" +
24606      "Section\032a\n\016INodeReference\022\022\n\nreferredId\030",
24607      "\001 \001(\004\022\014\n\004name\030\002 \001(\014\022\025\n\rdstSnapshotId\030\003 \001" +
24608      "(\r\022\026\n\016lastSnapshotId\030\004 \001(\r\"\265\001\n\017SnapshotS" +
24609      "ection\022\027\n\017snapshotCounter\030\001 \001(\r\022\034\n\020snaps" +
24610      "hottableDir\030\002 \003(\004B\002\020\001\022\024\n\014numSnapshots\030\003 " +
24611      "\001(\r\032U\n\010Snapshot\022\022\n\nsnapshotId\030\001 \001(\r\0225\n\004r" +
24612      "oot\030\002 \001(\0132\'.hadoop.hdfs.fsimage.INodeSec" +
24613      "tion.INode\"\200\005\n\023SnapshotDiffSection\032 \n\020Cr" +
24614      "eatedListEntry\022\014\n\004name\030\001 \001(\014\032\367\001\n\rDirecto" +
24615      "ryDiff\022\022\n\nsnapshotId\030\001 \001(\r\022\024\n\014childrenSi" +
24616      "ze\030\002 \001(\r\022\026\n\016isSnapshotRoot\030\003 \001(\010\022\014\n\004name",
24617      "\030\004 \001(\014\022F\n\014snapshotCopy\030\005 \001(\01320.hadoop.hd" +
24618      "fs.fsimage.INodeSection.INodeDirectory\022\027" +
24619      "\n\017createdListSize\030\006 \001(\r\022\030\n\014deletedINode\030" +
24620      "\007 \003(\004B\002\020\001\022\033\n\017deletedINodeRef\030\010 \003(\rB\002\020\001\032\252" +
24621      "\001\n\010FileDiff\022\022\n\nsnapshotId\030\001 \001(\r\022\020\n\010fileS" +
24622      "ize\030\002 \001(\004\022\014\n\004name\030\003 \001(\014\022A\n\014snapshotCopy\030" +
24623      "\004 \001(\0132+.hadoop.hdfs.fsimage.INodeSection" +
24624      ".INodeFile\022\'\n\006blocks\030\005 \003(\0132\027.hadoop.hdfs" +
24625      ".BlockProto\032\237\001\n\tDiffEntry\022E\n\004type\030\001 \002(\0162" +
24626      "7.hadoop.hdfs.fsimage.SnapshotDiffSectio",
24627      "n.DiffEntry.Type\022\017\n\007inodeId\030\002 \001(\004\022\021\n\tnum" +
24628      "OfDiff\030\003 \001(\r\"\'\n\004Type\022\014\n\010FILEDIFF\020\001\022\021\n\rDI" +
24629      "RECTORYDIFF\020\002\"H\n\022StringTableSection\022\020\n\010n" +
24630      "umEntry\030\001 \001(\r\032 \n\005Entry\022\n\n\002id\030\001 \001(\r\022\013\n\003st" +
24631      "r\030\002 \001(\t\"\341\002\n\024SecretManagerSection\022\021\n\tcurr" +
24632      "entId\030\001 \001(\r\022\033\n\023tokenSequenceNumber\030\002 \001(\r" +
24633      "\022\017\n\007numKeys\030\003 \001(\r\022\021\n\tnumTokens\030\004 \001(\r\032<\n\r" +
24634      "DelegationKey\022\n\n\002id\030\001 \001(\r\022\022\n\nexpiryDate\030" +
24635      "\002 \001(\004\022\013\n\003key\030\003 \001(\014\032\266\001\n\014PersistToken\022\017\n\007v" +
24636      "ersion\030\001 \001(\r\022\r\n\005owner\030\002 \001(\t\022\017\n\007renewer\030\003",
24637      " \001(\t\022\020\n\010realUser\030\004 \001(\t\022\021\n\tissueDate\030\005 \001(" +
24638      "\004\022\017\n\007maxDate\030\006 \001(\004\022\026\n\016sequenceNumber\030\007 \001" +
24639      "(\r\022\023\n\013masterKeyId\030\010 \001(\r\022\022\n\nexpiryDate\030\t " +
24640      "\001(\004\"W\n\023CacheManagerSection\022\027\n\017nextDirect" +
24641      "iveId\030\001 \002(\004\022\020\n\010numPools\030\002 \002(\r\022\025\n\rnumDire" +
24642      "ctives\030\003 \002(\rB6\n&org.apache.hadoop.hdfs.s" +
24643      "erver.namenodeB\014FsImageProto"
24644    };
24645    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
24646      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
24647        public com.google.protobuf.ExtensionRegistry assignDescriptors(
24648            com.google.protobuf.Descriptors.FileDescriptor root) {
24649          descriptor = root;
24650          internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor =
24651            getDescriptor().getMessageTypes().get(0);
24652          internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable = new
24653            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24654              internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor,
24655              new java.lang.String[] { "OndiskVersion", "LayoutVersion", "Codec", "Sections", });
24656          internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor =
24657            internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor.getNestedTypes().get(0);
24658          internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable = new
24659            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24660              internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor,
24661              new java.lang.String[] { "Name", "Length", "Offset", });
24662          internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor =
24663            getDescriptor().getMessageTypes().get(1);
24664          internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable = new
24665            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24666              internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor,
24667              new java.lang.String[] { "NamespaceId", "GenstampV1", "GenstampV2", "GenstampV1Limit", "LastAllocatedBlockId", "TransactionId", "RollingUpgradeStartTime", });
24668          internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor =
24669            getDescriptor().getMessageTypes().get(2);
24670          internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable = new
24671            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24672              internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor,
24673              new java.lang.String[] { "LastInodeId", "NumInodes", });
24674          internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor =
24675            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(0);
24676          internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable = new
24677            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24678              internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor,
24679              new java.lang.String[] { "ClientName", "ClientMachine", });
24680          internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor =
24681            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(1);
24682          internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable = new
24683            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24684              internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor,
24685              new java.lang.String[] { "Entries", });
24686          internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor =
24687            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(2);
24688          internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable = new
24689            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24690              internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor,
24691              new java.lang.String[] { "Name", "Value", });
24692          internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor =
24693            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(3);
24694          internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable = new
24695            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24696              internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor,
24697              new java.lang.String[] { "XAttrs", });
24698          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor =
24699            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(4);
24700          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable = new
24701            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24702              internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor,
24703              new java.lang.String[] { "Replication", "ModificationTime", "AccessTime", "PreferredBlockSize", "Permission", "Blocks", "FileUC", "Acl", "XAttrs", "StoragePolicyID", });
24704          internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor =
24705            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(5);
24706          internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable = new
24707            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24708              internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor,
24709              new java.lang.String[] { "StorageType", "Quota", });
24710          internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor =
24711            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(6);
24712          internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable = new
24713            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24714              internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor,
24715              new java.lang.String[] { "Quotas", });
24716          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor =
24717            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(7);
24718          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable = new
24719            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24720              internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor,
24721              new java.lang.String[] { "ModificationTime", "NsQuota", "DsQuota", "Permission", "Acl", "XAttrs", "TypeQuotas", });
24722          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor =
24723            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(8);
24724          internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable = new
24725            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24726              internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor,
24727              new java.lang.String[] { "Permission", "Target", "ModificationTime", "AccessTime", });
24728          internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor =
24729            internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(9);
24730          internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable = new
24731            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24732              internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor,
24733              new java.lang.String[] { "Type", "Id", "Name", "File", "Directory", "Symlink", });
24734          internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor =
24735            getDescriptor().getMessageTypes().get(3);
24736          internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable = new
24737            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24738              internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor,
24739              new java.lang.String[] { });
24740          internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor =
24741            internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor.getNestedTypes().get(0);
24742          internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable = new
24743            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24744              internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor,
24745              new java.lang.String[] { "InodeId", "FullPath", });
24746          internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor =
24747            getDescriptor().getMessageTypes().get(4);
24748          internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable = new
24749            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24750              internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor,
24751              new java.lang.String[] { });
24752          internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor =
24753            internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor.getNestedTypes().get(0);
24754          internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable = new
24755            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24756              internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor,
24757              new java.lang.String[] { "Parent", "Children", "RefChildren", });
24758          internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor =
24759            getDescriptor().getMessageTypes().get(5);
24760          internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable = new
24761            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24762              internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor,
24763              new java.lang.String[] { });
24764          internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor =
24765            internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor.getNestedTypes().get(0);
24766          internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable = new
24767            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24768              internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor,
24769              new java.lang.String[] { "ReferredId", "Name", "DstSnapshotId", "LastSnapshotId", });
24770          internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor =
24771            getDescriptor().getMessageTypes().get(6);
24772          internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable = new
24773            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24774              internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor,
24775              new java.lang.String[] { "SnapshotCounter", "SnapshottableDir", "NumSnapshots", });
24776          internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor =
24777            internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor.getNestedTypes().get(0);
24778          internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable = new
24779            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24780              internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor,
24781              new java.lang.String[] { "SnapshotId", "Root", });
24782          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor =
24783            getDescriptor().getMessageTypes().get(7);
24784          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable = new
24785            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24786              internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor,
24787              new java.lang.String[] { });
24788          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor =
24789            internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(0);
24790          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable = new
24791            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24792              internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor,
24793              new java.lang.String[] { "Name", });
24794          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor =
24795            internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(1);
24796          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable = new
24797            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24798              internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor,
24799              new java.lang.String[] { "SnapshotId", "ChildrenSize", "IsSnapshotRoot", "Name", "SnapshotCopy", "CreatedListSize", "DeletedINode", "DeletedINodeRef", });
24800          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor =
24801            internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(2);
24802          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable = new
24803            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24804              internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor,
24805              new java.lang.String[] { "SnapshotId", "FileSize", "Name", "SnapshotCopy", "Blocks", });
24806          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor =
24807            internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(3);
24808          internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable = new
24809            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24810              internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor,
24811              new java.lang.String[] { "Type", "InodeId", "NumOfDiff", });
24812          internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor =
24813            getDescriptor().getMessageTypes().get(8);
24814          internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable = new
24815            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24816              internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor,
24817              new java.lang.String[] { "NumEntry", });
24818          internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor =
24819            internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor.getNestedTypes().get(0);
24820          internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable = new
24821            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24822              internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor,
24823              new java.lang.String[] { "Id", "Str", });
24824          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor =
24825            getDescriptor().getMessageTypes().get(9);
24826          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable = new
24827            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24828              internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor,
24829              new java.lang.String[] { "CurrentId", "TokenSequenceNumber", "NumKeys", "NumTokens", });
24830          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor =
24831            internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(0);
24832          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable = new
24833            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24834              internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor,
24835              new java.lang.String[] { "Id", "ExpiryDate", "Key", });
24836          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor =
24837            internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(1);
24838          internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable = new
24839            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24840              internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor,
24841              new java.lang.String[] { "Version", "Owner", "Renewer", "RealUser", "IssueDate", "MaxDate", "SequenceNumber", "MasterKeyId", "ExpiryDate", });
24842          internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor =
24843            getDescriptor().getMessageTypes().get(10);
24844          internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable = new
24845            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
24846              internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor,
24847              new java.lang.String[] { "NextDirectiveId", "NumPools", "NumDirectives", });
24848          return null;
24849        }
24850      };
24851    com.google.protobuf.Descriptors.FileDescriptor
24852      .internalBuildGeneratedFileFrom(descriptorData,
24853        new com.google.protobuf.Descriptors.FileDescriptor[] {
24854          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
24855          org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(),
24856          org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(),
24857        }, assigner);
24858  }
24859
24860  // @@protoc_insertion_point(outer_class_scope)
24861}