FsImageProto.java

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: fsimage.proto

// Protobuf Java Version: 3.25.5
package org.apache.hadoop.hdfs.server.namenode;

public final class FsImageProto {
  private FsImageProto() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  public interface FileSummaryOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FileSummary)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * The version of the above EBNF grammars.
     * </pre>
     *
     * <code>required uint32 ondiskVersion = 1;</code>
     * @return Whether the ondiskVersion field is set.
     */
    boolean hasOndiskVersion();
    /**
     * <pre>
     * The version of the above EBNF grammars.
     * </pre>
     *
     * <code>required uint32 ondiskVersion = 1;</code>
     * @return The ondiskVersion.
     */
    int getOndiskVersion();

    /**
     * <pre>
     * layoutVersion describes which features are available in the
     * FSImage.
     * </pre>
     *
     * <code>required uint32 layoutVersion = 2;</code>
     * @return Whether the layoutVersion field is set.
     */
    boolean hasLayoutVersion();
    /**
     * <pre>
     * layoutVersion describes which features are available in the
     * FSImage.
     * </pre>
     *
     * <code>required uint32 layoutVersion = 2;</code>
     * @return The layoutVersion.
     */
    int getLayoutVersion();

    /**
     * <code>optional string codec = 3;</code>
     * @return Whether the codec field is set.
     */
    boolean hasCodec();
    /**
     * <code>optional string codec = 3;</code>
     * @return The codec.
     */
    java.lang.String getCodec();
    /**
     * <code>optional string codec = 3;</code>
     * @return The bytes for codec.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCodecBytes();

    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> 
        getSectionsList();
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index);
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    int getSectionsCount();
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
        getSectionsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
        int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary}
   */
  public static final class FileSummary extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FileSummary)
      FileSummaryOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FileSummary.newBuilder() to construct.
    private FileSummary(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FileSummary() {
      codec_ = "";
      sections_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FileSummary();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class);
    }

    public interface SectionOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FileSummary.Section)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional string name = 1;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <code>optional string name = 1;</code>
       * @return The name.
       */
      java.lang.String getName();
      /**
       * <code>optional string name = 1;</code>
       * @return The bytes for name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getNameBytes();

      /**
       * <code>optional uint64 length = 2;</code>
       * @return Whether the length field is set.
       */
      boolean hasLength();
      /**
       * <code>optional uint64 length = 2;</code>
       * @return The length.
       */
      long getLength();

      /**
       * <code>optional uint64 offset = 3;</code>
       * @return Whether the offset field is set.
       */
      boolean hasOffset();
      /**
       * <code>optional uint64 offset = 3;</code>
       * @return The offset.
       */
      long getOffset();
    }
    /**
     * <pre>
     * index for each section
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section}
     */
    public static final class Section extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FileSummary.Section)
        SectionOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use Section.newBuilder() to construct.
      private Section(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private Section() {
        name_ = "";
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new Section();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class);
      }

      private int bitField0_;
      public static final int NAME_FIELD_NUMBER = 1;
      @SuppressWarnings("serial")
      private volatile java.lang.Object name_ = "";
      /**
       * <code>optional string name = 1;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The name.
       */
      @java.lang.Override
      public java.lang.String getName() {
        java.lang.Object ref = name_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            name_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The bytes for name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNameBytes() {
        java.lang.Object ref = name_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          name_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      public static final int LENGTH_FIELD_NUMBER = 2;
      private long length_ = 0L;
      /**
       * <code>optional uint64 length = 2;</code>
       * @return Whether the length field is set.
       */
      @java.lang.Override
      public boolean hasLength() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 length = 2;</code>
       * @return The length.
       */
      @java.lang.Override
      public long getLength() {
        return length_;
      }

      public static final int OFFSET_FIELD_NUMBER = 3;
      private long offset_ = 0L;
      /**
       * <code>optional uint64 offset = 3;</code>
       * @return Whether the offset field is set.
       */
      @java.lang.Override
      public boolean hasOffset() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 offset = 3;</code>
       * @return The offset.
       */
      @java.lang.Override
      public long getOffset() {
        return offset_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, length_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt64(3, offset_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, length_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(3, offset_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) obj;

        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (hasLength() != other.hasLength()) return false;
        if (hasLength()) {
          if (getLength()
              != other.getLength()) return false;
        }
        if (hasOffset() != other.hasOffset()) return false;
        if (hasOffset()) {
          if (getOffset()
              != other.getOffset()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        if (hasLength()) {
          hash = (37 * hash) + LENGTH_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getLength());
        }
        if (hasOffset()) {
          hash = (37 * hash) + OFFSET_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getOffset());
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * <pre>
       * index for each section
       * </pre>
       *
       * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary.Section}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FileSummary.Section)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          name_ = "";
          length_ = 0L;
          offset_ = 0L;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.length_ = length_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.offset_ = offset_;
            to_bitField0_ |= 0x00000004;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance()) return this;
          if (other.hasName()) {
            name_ = other.name_;
            bitField0_ |= 0x00000001;
            onChanged();
          }
          if (other.hasLength()) {
            setLength(other.getLength());
          }
          if (other.hasOffset()) {
            setOffset(other.getOffset());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 10: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 10
                case 16: {
                  length_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 24: {
                  offset_ = input.readUInt64();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private java.lang.Object name_ = "";
        /**
         * <code>optional string name = 1;</code>
         * @return Whether the name field is set.
         */
        public boolean hasName() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional string name = 1;</code>
         * @return The name.
         */
        public java.lang.String getName() {
          java.lang.Object ref = name_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              name_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string name = 1;</code>
         * @return The bytes for name.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getNameBytes() {
          java.lang.Object ref = name_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            name_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string name = 1;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional string name = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          name_ = getDefaultInstance().getName();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
          return this;
        }
        /**
         * <code>optional string name = 1;</code>
         * @param value The bytes for name to set.
         * @return This builder for chaining.
         */
        public Builder setNameBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }

        private long length_ ;
        /**
         * <code>optional uint64 length = 2;</code>
         * @return Whether the length field is set.
         */
        @java.lang.Override
        public boolean hasLength() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint64 length = 2;</code>
         * @return The length.
         */
        @java.lang.Override
        public long getLength() {
          return length_;
        }
        /**
         * <code>optional uint64 length = 2;</code>
         * @param value The length to set.
         * @return This builder for chaining.
         */
        public Builder setLength(long value) {

          length_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 length = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearLength() {
          bitField0_ = (bitField0_ & ~0x00000002);
          length_ = 0L;
          onChanged();
          return this;
        }

        private long offset_ ;
        /**
         * <code>optional uint64 offset = 3;</code>
         * @return Whether the offset field is set.
         */
        @java.lang.Override
        public boolean hasOffset() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional uint64 offset = 3;</code>
         * @return The offset.
         */
        @java.lang.Override
        public long getOffset() {
          return offset_;
        }
        /**
         * <code>optional uint64 offset = 3;</code>
         * @param value The offset to set.
         * @return This builder for chaining.
         */
        public Builder setOffset(long value) {

          offset_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 offset = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearOffset() {
          bitField0_ = (bitField0_ & ~0x00000004);
          offset_ = 0L;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary.Section)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary.Section)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<Section>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<Section>() {
        @java.lang.Override
        public Section parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<Section> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<Section> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private int bitField0_;
    public static final int ONDISKVERSION_FIELD_NUMBER = 1;
    private int ondiskVersion_ = 0;
    /**
     * <pre>
     * The version of the above EBNF grammars.
     * </pre>
     *
     * <code>required uint32 ondiskVersion = 1;</code>
     * @return Whether the ondiskVersion field is set.
     */
    @java.lang.Override
    public boolean hasOndiskVersion() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * The version of the above EBNF grammars.
     * </pre>
     *
     * <code>required uint32 ondiskVersion = 1;</code>
     * @return The ondiskVersion.
     */
    @java.lang.Override
    public int getOndiskVersion() {
      return ondiskVersion_;
    }

    public static final int LAYOUTVERSION_FIELD_NUMBER = 2;
    private int layoutVersion_ = 0;
    /**
     * <pre>
     * layoutVersion describes which features are available in the
     * FSImage.
     * </pre>
     *
     * <code>required uint32 layoutVersion = 2;</code>
     * @return Whether the layoutVersion field is set.
     */
    @java.lang.Override
    public boolean hasLayoutVersion() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * layoutVersion describes which features are available in the
     * FSImage.
     * </pre>
     *
     * <code>required uint32 layoutVersion = 2;</code>
     * @return The layoutVersion.
     */
    @java.lang.Override
    public int getLayoutVersion() {
      return layoutVersion_;
    }

    public static final int CODEC_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object codec_ = "";
    /**
     * <code>optional string codec = 3;</code>
     * @return Whether the codec field is set.
     */
    @java.lang.Override
    public boolean hasCodec() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string codec = 3;</code>
     * @return The codec.
     */
    @java.lang.Override
    public java.lang.String getCodec() {
      java.lang.Object ref = codec_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          codec_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string codec = 3;</code>
     * @return The bytes for codec.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCodecBytes() {
      java.lang.Object ref = codec_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        codec_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SECTIONS_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> sections_;
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> getSectionsList() {
      return sections_;
    }
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
        getSectionsOrBuilderList() {
      return sections_;
    }
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    @java.lang.Override
    public int getSectionsCount() {
      return sections_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) {
      return sections_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
        int index) {
      return sections_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasOndiskVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLayoutVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, ondiskVersion_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, layoutVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, codec_);
      }
      for (int i = 0; i < sections_.size(); i++) {
        output.writeMessage(4, sections_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, ondiskVersion_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, layoutVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, codec_);
      }
      for (int i = 0; i < sections_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, sections_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) obj;

      if (hasOndiskVersion() != other.hasOndiskVersion()) return false;
      if (hasOndiskVersion()) {
        if (getOndiskVersion()
            != other.getOndiskVersion()) return false;
      }
      if (hasLayoutVersion() != other.hasLayoutVersion()) return false;
      if (hasLayoutVersion()) {
        if (getLayoutVersion()
            != other.getLayoutVersion()) return false;
      }
      if (hasCodec() != other.hasCodec()) return false;
      if (hasCodec()) {
        if (!getCodec()
            .equals(other.getCodec())) return false;
      }
      if (!getSectionsList()
          .equals(other.getSectionsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasOndiskVersion()) {
        hash = (37 * hash) + ONDISKVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getOndiskVersion();
      }
      if (hasLayoutVersion()) {
        hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getLayoutVersion();
      }
      if (hasCodec()) {
        hash = (37 * hash) + CODEC_FIELD_NUMBER;
        hash = (53 * hash) + getCodec().hashCode();
      }
      if (getSectionsCount() > 0) {
        hash = (37 * hash) + SECTIONS_FIELD_NUMBER;
        hash = (53 * hash) + getSectionsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.FileSummary}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FileSummary)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummaryOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        ondiskVersion_ = 0;
        layoutVersion_ = 0;
        codec_ = "";
        if (sectionsBuilder_ == null) {
          sections_ = java.util.Collections.emptyList();
        } else {
          sections_ = null;
          sectionsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000008);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result) {
        if (sectionsBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0)) {
            sections_ = java.util.Collections.unmodifiableList(sections_);
            bitField0_ = (bitField0_ & ~0x00000008);
          }
          result.sections_ = sections_;
        } else {
          result.sections_ = sectionsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.ondiskVersion_ = ondiskVersion_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.layoutVersion_ = layoutVersion_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.codec_ = codec_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.getDefaultInstance()) return this;
        if (other.hasOndiskVersion()) {
          setOndiskVersion(other.getOndiskVersion());
        }
        if (other.hasLayoutVersion()) {
          setLayoutVersion(other.getLayoutVersion());
        }
        if (other.hasCodec()) {
          codec_ = other.codec_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (sectionsBuilder_ == null) {
          if (!other.sections_.isEmpty()) {
            if (sections_.isEmpty()) {
              sections_ = other.sections_;
              bitField0_ = (bitField0_ & ~0x00000008);
            } else {
              ensureSectionsIsMutable();
              sections_.addAll(other.sections_);
            }
            onChanged();
          }
        } else {
          if (!other.sections_.isEmpty()) {
            if (sectionsBuilder_.isEmpty()) {
              sectionsBuilder_.dispose();
              sectionsBuilder_ = null;
              sections_ = other.sections_;
              bitField0_ = (bitField0_ & ~0x00000008);
              sectionsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getSectionsFieldBuilder() : null;
            } else {
              sectionsBuilder_.addAllMessages(other.sections_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasOndiskVersion()) {
          return false;
        }
        if (!hasLayoutVersion()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                ondiskVersion_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                layoutVersion_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                codec_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.PARSER,
                        extensionRegistry);
                if (sectionsBuilder_ == null) {
                  ensureSectionsIsMutable();
                  sections_.add(m);
                } else {
                  sectionsBuilder_.addMessage(m);
                }
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int ondiskVersion_ ;
      /**
       * <pre>
       * The version of the above EBNF grammars.
       * </pre>
       *
       * <code>required uint32 ondiskVersion = 1;</code>
       * @return Whether the ondiskVersion field is set.
       */
      @java.lang.Override
      public boolean hasOndiskVersion() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * The version of the above EBNF grammars.
       * </pre>
       *
       * <code>required uint32 ondiskVersion = 1;</code>
       * @return The ondiskVersion.
       */
      @java.lang.Override
      public int getOndiskVersion() {
        return ondiskVersion_;
      }
      /**
       * <pre>
       * The version of the above EBNF grammars.
       * </pre>
       *
       * <code>required uint32 ondiskVersion = 1;</code>
       * @param value The ondiskVersion to set.
       * @return This builder for chaining.
       */
      public Builder setOndiskVersion(int value) {

        ondiskVersion_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The version of the above EBNF grammars.
       * </pre>
       *
       * <code>required uint32 ondiskVersion = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearOndiskVersion() {
        bitField0_ = (bitField0_ & ~0x00000001);
        ondiskVersion_ = 0;
        onChanged();
        return this;
      }

      private int layoutVersion_ ;
      /**
       * <pre>
       * layoutVersion describes which features are available in the
       * FSImage.
       * </pre>
       *
       * <code>required uint32 layoutVersion = 2;</code>
       * @return Whether the layoutVersion field is set.
       */
      @java.lang.Override
      public boolean hasLayoutVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * layoutVersion describes which features are available in the
       * FSImage.
       * </pre>
       *
       * <code>required uint32 layoutVersion = 2;</code>
       * @return The layoutVersion.
       */
      @java.lang.Override
      public int getLayoutVersion() {
        return layoutVersion_;
      }
      /**
       * <pre>
       * layoutVersion describes which features are available in the
       * FSImage.
       * </pre>
       *
       * <code>required uint32 layoutVersion = 2;</code>
       * @param value The layoutVersion to set.
       * @return This builder for chaining.
       */
      public Builder setLayoutVersion(int value) {

        layoutVersion_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * layoutVersion describes which features are available in the
       * FSImage.
       * </pre>
       *
       * <code>required uint32 layoutVersion = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearLayoutVersion() {
        bitField0_ = (bitField0_ & ~0x00000002);
        layoutVersion_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object codec_ = "";
      /**
       * <code>optional string codec = 3;</code>
       * @return Whether the codec field is set.
       */
      public boolean hasCodec() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string codec = 3;</code>
       * @return The codec.
       */
      public java.lang.String getCodec() {
        java.lang.Object ref = codec_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            codec_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string codec = 3;</code>
       * @return The bytes for codec.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCodecBytes() {
        java.lang.Object ref = codec_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          codec_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string codec = 3;</code>
       * @param value The codec to set.
       * @return This builder for chaining.
       */
      public Builder setCodec(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        codec_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string codec = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearCodec() {
        codec_ = getDefaultInstance().getCodec();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string codec = 3;</code>
       * @param value The bytes for codec to set.
       * @return This builder for chaining.
       */
      public Builder setCodecBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        codec_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> sections_ =
        java.util.Collections.emptyList();
      private void ensureSectionsIsMutable() {
        if (!((bitField0_ & 0x00000008) != 0)) {
          sections_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section>(sections_);
          bitField0_ |= 0x00000008;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> sectionsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> getSectionsList() {
        if (sectionsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(sections_);
        } else {
          return sectionsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public int getSectionsCount() {
        if (sectionsBuilder_ == null) {
          return sections_.size();
        } else {
          return sectionsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section getSections(int index) {
        if (sectionsBuilder_ == null) {
          return sections_.get(index);
        } else {
          return sectionsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder setSections(
          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
        if (sectionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSectionsIsMutable();
          sections_.set(index, value);
          onChanged();
        } else {
          sectionsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder setSections(
          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
        if (sectionsBuilder_ == null) {
          ensureSectionsIsMutable();
          sections_.set(index, builderForValue.build());
          onChanged();
        } else {
          sectionsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder addSections(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
        if (sectionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSectionsIsMutable();
          sections_.add(value);
          onChanged();
        } else {
          sectionsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder addSections(
          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section value) {
        if (sectionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSectionsIsMutable();
          sections_.add(index, value);
          onChanged();
        } else {
          sectionsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder addSections(
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
        if (sectionsBuilder_ == null) {
          ensureSectionsIsMutable();
          sections_.add(builderForValue.build());
          onChanged();
        } else {
          sectionsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder addSections(
          int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder builderForValue) {
        if (sectionsBuilder_ == null) {
          ensureSectionsIsMutable();
          sections_.add(index, builderForValue.build());
          onChanged();
        } else {
          sectionsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder addAllSections(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section> values) {
        if (sectionsBuilder_ == null) {
          ensureSectionsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, sections_);
          onChanged();
        } else {
          sectionsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder clearSections() {
        if (sectionsBuilder_ == null) {
          sections_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
        } else {
          sectionsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public Builder removeSections(int index) {
        if (sectionsBuilder_ == null) {
          ensureSectionsIsMutable();
          sections_.remove(index);
          onChanged();
        } else {
          sectionsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder getSectionsBuilder(
          int index) {
        return getSectionsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder getSectionsOrBuilder(
          int index) {
        if (sectionsBuilder_ == null) {
          return sections_.get(index);  } else {
          return sectionsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
           getSectionsOrBuilderList() {
        if (sectionsBuilder_ != null) {
          return sectionsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(sections_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder() {
        return getSectionsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder addSectionsBuilder(
          int index) {
        return getSectionsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.FileSummary.Section sections = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder> 
           getSectionsBuilderList() {
        return getSectionsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder> 
          getSectionsFieldBuilder() {
        if (sectionsBuilder_ == null) {
          sectionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.Section.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary.SectionOrBuilder>(
                  sections_,
                  ((bitField0_ & 0x00000008) != 0),
                  getParentForChildren(),
                  isClean());
          sections_ = null;
        }
        return sectionsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FileSummary)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FileSummary)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FileSummary>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FileSummary>() {
      @java.lang.Override
      public FileSummary parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FileSummary> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FileSummary> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface NameSystemSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.NameSystemSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint32 namespaceId = 1;</code>
     * @return Whether the namespaceId field is set.
     */
    boolean hasNamespaceId();
    /**
     * <code>optional uint32 namespaceId = 1;</code>
     * @return The namespaceId.
     */
    int getNamespaceId();

    /**
     * <pre>
     * legacy generation stamp
     * </pre>
     *
     * <code>optional uint64 genstampV1 = 2;</code>
     * @return Whether the genstampV1 field is set.
     */
    boolean hasGenstampV1();
    /**
     * <pre>
     * legacy generation stamp
     * </pre>
     *
     * <code>optional uint64 genstampV1 = 2;</code>
     * @return The genstampV1.
     */
    long getGenstampV1();

    /**
     * <pre>
     * generation stamp of latest version
     * </pre>
     *
     * <code>optional uint64 genstampV2 = 3;</code>
     * @return Whether the genstampV2 field is set.
     */
    boolean hasGenstampV2();
    /**
     * <pre>
     * generation stamp of latest version
     * </pre>
     *
     * <code>optional uint64 genstampV2 = 3;</code>
     * @return The genstampV2.
     */
    long getGenstampV2();

    /**
     * <code>optional uint64 genstampV1Limit = 4;</code>
     * @return Whether the genstampV1Limit field is set.
     */
    boolean hasGenstampV1Limit();
    /**
     * <code>optional uint64 genstampV1Limit = 4;</code>
     * @return The genstampV1Limit.
     */
    long getGenstampV1Limit();

    /**
     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
     * @return Whether the lastAllocatedBlockId field is set.
     */
    boolean hasLastAllocatedBlockId();
    /**
     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
     * @return The lastAllocatedBlockId.
     */
    long getLastAllocatedBlockId();

    /**
     * <code>optional uint64 transactionId = 6;</code>
     * @return Whether the transactionId field is set.
     */
    boolean hasTransactionId();
    /**
     * <code>optional uint64 transactionId = 6;</code>
     * @return The transactionId.
     */
    long getTransactionId();

    /**
     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
     * @return Whether the rollingUpgradeStartTime field is set.
     */
    boolean hasRollingUpgradeStartTime();
    /**
     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
     * @return The rollingUpgradeStartTime.
     */
    long getRollingUpgradeStartTime();

    /**
     * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
     * @return Whether the lastAllocatedStripedBlockId field is set.
     */
    boolean hasLastAllocatedStripedBlockId();
    /**
     * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
     * @return The lastAllocatedStripedBlockId.
     */
    long getLastAllocatedStripedBlockId();
  }
  /**
   * <pre>
   **
   * Name: NS_INFO
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection}
   */
  public static final class NameSystemSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.NameSystemSection)
      NameSystemSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use NameSystemSection.newBuilder() to construct.
    private NameSystemSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private NameSystemSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new NameSystemSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class);
    }

    private int bitField0_;
    public static final int NAMESPACEID_FIELD_NUMBER = 1;
    private int namespaceId_ = 0;
    /**
     * <code>optional uint32 namespaceId = 1;</code>
     * @return Whether the namespaceId field is set.
     */
    @java.lang.Override
    public boolean hasNamespaceId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint32 namespaceId = 1;</code>
     * @return The namespaceId.
     */
    @java.lang.Override
    public int getNamespaceId() {
      return namespaceId_;
    }

    public static final int GENSTAMPV1_FIELD_NUMBER = 2;
    private long genstampV1_ = 0L;
    /**
     * <pre>
     * legacy generation stamp
     * </pre>
     *
     * <code>optional uint64 genstampV1 = 2;</code>
     * @return Whether the genstampV1 field is set.
     */
    @java.lang.Override
    public boolean hasGenstampV1() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * legacy generation stamp
     * </pre>
     *
     * <code>optional uint64 genstampV1 = 2;</code>
     * @return The genstampV1.
     */
    @java.lang.Override
    public long getGenstampV1() {
      return genstampV1_;
    }

    public static final int GENSTAMPV2_FIELD_NUMBER = 3;
    private long genstampV2_ = 0L;
    /**
     * <pre>
     * generation stamp of latest version
     * </pre>
     *
     * <code>optional uint64 genstampV2 = 3;</code>
     * @return Whether the genstampV2 field is set.
     */
    @java.lang.Override
    public boolean hasGenstampV2() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * generation stamp of latest version
     * </pre>
     *
     * <code>optional uint64 genstampV2 = 3;</code>
     * @return The genstampV2.
     */
    @java.lang.Override
    public long getGenstampV2() {
      return genstampV2_;
    }

    public static final int GENSTAMPV1LIMIT_FIELD_NUMBER = 4;
    private long genstampV1Limit_ = 0L;
    /**
     * <code>optional uint64 genstampV1Limit = 4;</code>
     * @return Whether the genstampV1Limit field is set.
     */
    @java.lang.Override
    public boolean hasGenstampV1Limit() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional uint64 genstampV1Limit = 4;</code>
     * @return The genstampV1Limit.
     */
    @java.lang.Override
    public long getGenstampV1Limit() {
      return genstampV1Limit_;
    }

    public static final int LASTALLOCATEDBLOCKID_FIELD_NUMBER = 5;
    private long lastAllocatedBlockId_ = 0L;
    /**
     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
     * @return Whether the lastAllocatedBlockId field is set.
     */
    @java.lang.Override
    public boolean hasLastAllocatedBlockId() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 lastAllocatedBlockId = 5;</code>
     * @return The lastAllocatedBlockId.
     */
    @java.lang.Override
    public long getLastAllocatedBlockId() {
      return lastAllocatedBlockId_;
    }

    public static final int TRANSACTIONID_FIELD_NUMBER = 6;
    private long transactionId_ = 0L;
    /**
     * <code>optional uint64 transactionId = 6;</code>
     * @return Whether the transactionId field is set.
     */
    @java.lang.Override
    public boolean hasTransactionId() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional uint64 transactionId = 6;</code>
     * @return The transactionId.
     */
    @java.lang.Override
    public long getTransactionId() {
      return transactionId_;
    }

    public static final int ROLLINGUPGRADESTARTTIME_FIELD_NUMBER = 7;
    private long rollingUpgradeStartTime_ = 0L;
    /**
     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
     * @return Whether the rollingUpgradeStartTime field is set.
     */
    @java.lang.Override
    public boolean hasRollingUpgradeStartTime() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
     * @return The rollingUpgradeStartTime.
     */
    @java.lang.Override
    public long getRollingUpgradeStartTime() {
      return rollingUpgradeStartTime_;
    }

    public static final int LASTALLOCATEDSTRIPEDBLOCKID_FIELD_NUMBER = 8;
    private long lastAllocatedStripedBlockId_ = 0L;
    /**
     * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
     * @return Whether the lastAllocatedStripedBlockId field is set.
     */
    @java.lang.Override
    public boolean hasLastAllocatedStripedBlockId() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
     * @return The lastAllocatedStripedBlockId.
     */
    @java.lang.Override
    public long getLastAllocatedStripedBlockId() {
      return lastAllocatedStripedBlockId_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, namespaceId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, genstampV1_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, genstampV2_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, genstampV1Limit_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, lastAllocatedBlockId_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, transactionId_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt64(7, rollingUpgradeStartTime_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeUInt64(8, lastAllocatedStripedBlockId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, namespaceId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, genstampV1_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, genstampV2_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, genstampV1Limit_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, lastAllocatedBlockId_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, transactionId_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, rollingUpgradeStartTime_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(8, lastAllocatedStripedBlockId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) obj;

      if (hasNamespaceId() != other.hasNamespaceId()) return false;
      if (hasNamespaceId()) {
        if (getNamespaceId()
            != other.getNamespaceId()) return false;
      }
      if (hasGenstampV1() != other.hasGenstampV1()) return false;
      if (hasGenstampV1()) {
        if (getGenstampV1()
            != other.getGenstampV1()) return false;
      }
      if (hasGenstampV2() != other.hasGenstampV2()) return false;
      if (hasGenstampV2()) {
        if (getGenstampV2()
            != other.getGenstampV2()) return false;
      }
      if (hasGenstampV1Limit() != other.hasGenstampV1Limit()) return false;
      if (hasGenstampV1Limit()) {
        if (getGenstampV1Limit()
            != other.getGenstampV1Limit()) return false;
      }
      if (hasLastAllocatedBlockId() != other.hasLastAllocatedBlockId()) return false;
      if (hasLastAllocatedBlockId()) {
        if (getLastAllocatedBlockId()
            != other.getLastAllocatedBlockId()) return false;
      }
      if (hasTransactionId() != other.hasTransactionId()) return false;
      if (hasTransactionId()) {
        if (getTransactionId()
            != other.getTransactionId()) return false;
      }
      if (hasRollingUpgradeStartTime() != other.hasRollingUpgradeStartTime()) return false;
      if (hasRollingUpgradeStartTime()) {
        if (getRollingUpgradeStartTime()
            != other.getRollingUpgradeStartTime()) return false;
      }
      if (hasLastAllocatedStripedBlockId() != other.hasLastAllocatedStripedBlockId()) return false;
      if (hasLastAllocatedStripedBlockId()) {
        if (getLastAllocatedStripedBlockId()
            != other.getLastAllocatedStripedBlockId()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasNamespaceId()) {
        hash = (37 * hash) + NAMESPACEID_FIELD_NUMBER;
        hash = (53 * hash) + getNamespaceId();
      }
      if (hasGenstampV1()) {
        hash = (37 * hash) + GENSTAMPV1_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getGenstampV1());
      }
      if (hasGenstampV2()) {
        hash = (37 * hash) + GENSTAMPV2_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getGenstampV2());
      }
      if (hasGenstampV1Limit()) {
        hash = (37 * hash) + GENSTAMPV1LIMIT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getGenstampV1Limit());
      }
      if (hasLastAllocatedBlockId()) {
        hash = (37 * hash) + LASTALLOCATEDBLOCKID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastAllocatedBlockId());
      }
      if (hasTransactionId()) {
        hash = (37 * hash) + TRANSACTIONID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTransactionId());
      }
      if (hasRollingUpgradeStartTime()) {
        hash = (37 * hash) + ROLLINGUPGRADESTARTTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getRollingUpgradeStartTime());
      }
      if (hasLastAllocatedStripedBlockId()) {
        hash = (37 * hash) + LASTALLOCATEDSTRIPEDBLOCKID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastAllocatedStripedBlockId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Name: NS_INFO
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.NameSystemSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.NameSystemSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        namespaceId_ = 0;
        genstampV1_ = 0L;
        genstampV2_ = 0L;
        genstampV1Limit_ = 0L;
        lastAllocatedBlockId_ = 0L;
        transactionId_ = 0L;
        rollingUpgradeStartTime_ = 0L;
        lastAllocatedStripedBlockId_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.namespaceId_ = namespaceId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.genstampV1_ = genstampV1_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.genstampV2_ = genstampV2_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.genstampV1Limit_ = genstampV1Limit_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.lastAllocatedBlockId_ = lastAllocatedBlockId_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.transactionId_ = transactionId_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.rollingUpgradeStartTime_ = rollingUpgradeStartTime_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.lastAllocatedStripedBlockId_ = lastAllocatedStripedBlockId_;
          to_bitField0_ |= 0x00000080;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection.getDefaultInstance()) return this;
        if (other.hasNamespaceId()) {
          setNamespaceId(other.getNamespaceId());
        }
        if (other.hasGenstampV1()) {
          setGenstampV1(other.getGenstampV1());
        }
        if (other.hasGenstampV2()) {
          setGenstampV2(other.getGenstampV2());
        }
        if (other.hasGenstampV1Limit()) {
          setGenstampV1Limit(other.getGenstampV1Limit());
        }
        if (other.hasLastAllocatedBlockId()) {
          setLastAllocatedBlockId(other.getLastAllocatedBlockId());
        }
        if (other.hasTransactionId()) {
          setTransactionId(other.getTransactionId());
        }
        if (other.hasRollingUpgradeStartTime()) {
          setRollingUpgradeStartTime(other.getRollingUpgradeStartTime());
        }
        if (other.hasLastAllocatedStripedBlockId()) {
          setLastAllocatedStripedBlockId(other.getLastAllocatedStripedBlockId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                namespaceId_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                genstampV1_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                genstampV2_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                genstampV1Limit_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                lastAllocatedBlockId_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                transactionId_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                rollingUpgradeStartTime_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 64: {
                lastAllocatedStripedBlockId_ = input.readUInt64();
                bitField0_ |= 0x00000080;
                break;
              } // case 64
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int namespaceId_ ;
      /**
       * <code>optional uint32 namespaceId = 1;</code>
       * @return Whether the namespaceId field is set.
       */
      @java.lang.Override
      public boolean hasNamespaceId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 namespaceId = 1;</code>
       * @return The namespaceId.
       */
      @java.lang.Override
      public int getNamespaceId() {
        return namespaceId_;
      }
      /**
       * <code>optional uint32 namespaceId = 1;</code>
       * @param value The namespaceId to set.
       * @return This builder for chaining.
       */
      public Builder setNamespaceId(int value) {

        namespaceId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 namespaceId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearNamespaceId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        namespaceId_ = 0;
        onChanged();
        return this;
      }

      private long genstampV1_ ;
      /**
       * <pre>
       * legacy generation stamp
       * </pre>
       *
       * <code>optional uint64 genstampV1 = 2;</code>
       * @return Whether the genstampV1 field is set.
       */
      @java.lang.Override
      public boolean hasGenstampV1() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * legacy generation stamp
       * </pre>
       *
       * <code>optional uint64 genstampV1 = 2;</code>
       * @return The genstampV1.
       */
      @java.lang.Override
      public long getGenstampV1() {
        return genstampV1_;
      }
      /**
       * <pre>
       * legacy generation stamp
       * </pre>
       *
       * <code>optional uint64 genstampV1 = 2;</code>
       * @param value The genstampV1 to set.
       * @return This builder for chaining.
       */
      public Builder setGenstampV1(long value) {

        genstampV1_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * legacy generation stamp
       * </pre>
       *
       * <code>optional uint64 genstampV1 = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearGenstampV1() {
        bitField0_ = (bitField0_ & ~0x00000002);
        genstampV1_ = 0L;
        onChanged();
        return this;
      }

      private long genstampV2_ ;
      /**
       * <pre>
       * generation stamp of latest version
       * </pre>
       *
       * <code>optional uint64 genstampV2 = 3;</code>
       * @return Whether the genstampV2 field is set.
       */
      @java.lang.Override
      public boolean hasGenstampV2() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * generation stamp of latest version
       * </pre>
       *
       * <code>optional uint64 genstampV2 = 3;</code>
       * @return The genstampV2.
       */
      @java.lang.Override
      public long getGenstampV2() {
        return genstampV2_;
      }
      /**
       * <pre>
       * generation stamp of latest version
       * </pre>
       *
       * <code>optional uint64 genstampV2 = 3;</code>
       * @param value The genstampV2 to set.
       * @return This builder for chaining.
       */
      public Builder setGenstampV2(long value) {

        genstampV2_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * generation stamp of latest version
       * </pre>
       *
       * <code>optional uint64 genstampV2 = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearGenstampV2() {
        bitField0_ = (bitField0_ & ~0x00000004);
        genstampV2_ = 0L;
        onChanged();
        return this;
      }

      private long genstampV1Limit_ ;
      /**
       * <code>optional uint64 genstampV1Limit = 4;</code>
       * @return Whether the genstampV1Limit field is set.
       */
      @java.lang.Override
      public boolean hasGenstampV1Limit() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint64 genstampV1Limit = 4;</code>
       * @return The genstampV1Limit.
       */
      @java.lang.Override
      public long getGenstampV1Limit() {
        return genstampV1Limit_;
      }
      /**
       * <code>optional uint64 genstampV1Limit = 4;</code>
       * @param value The genstampV1Limit to set.
       * @return This builder for chaining.
       */
      public Builder setGenstampV1Limit(long value) {

        genstampV1Limit_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 genstampV1Limit = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearGenstampV1Limit() {
        bitField0_ = (bitField0_ & ~0x00000008);
        genstampV1Limit_ = 0L;
        onChanged();
        return this;
      }

      private long lastAllocatedBlockId_ ;
      /**
       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
       * @return Whether the lastAllocatedBlockId field is set.
       */
      @java.lang.Override
      public boolean hasLastAllocatedBlockId() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
       * @return The lastAllocatedBlockId.
       */
      @java.lang.Override
      public long getLastAllocatedBlockId() {
        return lastAllocatedBlockId_;
      }
      /**
       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
       * @param value The lastAllocatedBlockId to set.
       * @return This builder for chaining.
       */
      public Builder setLastAllocatedBlockId(long value) {

        lastAllocatedBlockId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastAllocatedBlockId = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastAllocatedBlockId() {
        bitField0_ = (bitField0_ & ~0x00000010);
        lastAllocatedBlockId_ = 0L;
        onChanged();
        return this;
      }

      private long transactionId_ ;
      /**
       * <code>optional uint64 transactionId = 6;</code>
       * @return Whether the transactionId field is set.
       */
      @java.lang.Override
      public boolean hasTransactionId() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 transactionId = 6;</code>
       * @return The transactionId.
       */
      @java.lang.Override
      public long getTransactionId() {
        return transactionId_;
      }
      /**
       * <code>optional uint64 transactionId = 6;</code>
       * @param value The transactionId to set.
       * @return This builder for chaining.
       */
      public Builder setTransactionId(long value) {

        transactionId_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 transactionId = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearTransactionId() {
        bitField0_ = (bitField0_ & ~0x00000020);
        transactionId_ = 0L;
        onChanged();
        return this;
      }

      private long rollingUpgradeStartTime_ ;
      /**
       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
       * @return Whether the rollingUpgradeStartTime field is set.
       */
      @java.lang.Override
      public boolean hasRollingUpgradeStartTime() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
       * @return The rollingUpgradeStartTime.
       */
      @java.lang.Override
      public long getRollingUpgradeStartTime() {
        return rollingUpgradeStartTime_;
      }
      /**
       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
       * @param value The rollingUpgradeStartTime to set.
       * @return This builder for chaining.
       */
      public Builder setRollingUpgradeStartTime(long value) {

        rollingUpgradeStartTime_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 rollingUpgradeStartTime = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearRollingUpgradeStartTime() {
        bitField0_ = (bitField0_ & ~0x00000040);
        rollingUpgradeStartTime_ = 0L;
        onChanged();
        return this;
      }

      private long lastAllocatedStripedBlockId_ ;
      /**
       * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
       * @return Whether the lastAllocatedStripedBlockId field is set.
       */
      @java.lang.Override
      public boolean hasLastAllocatedStripedBlockId() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
       * @return The lastAllocatedStripedBlockId.
       */
      @java.lang.Override
      public long getLastAllocatedStripedBlockId() {
        return lastAllocatedStripedBlockId_;
      }
      /**
       * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
       * @param value The lastAllocatedStripedBlockId to set.
       * @return This builder for chaining.
       */
      public Builder setLastAllocatedStripedBlockId(long value) {

        lastAllocatedStripedBlockId_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastAllocatedStripedBlockId = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastAllocatedStripedBlockId() {
        bitField0_ = (bitField0_ & ~0x00000080);
        lastAllocatedStripedBlockId_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.NameSystemSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.NameSystemSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<NameSystemSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<NameSystemSection>() {
      @java.lang.Override
      public NameSystemSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<NameSystemSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<NameSystemSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface INodeSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint64 lastInodeId = 1;</code>
     * @return Whether the lastInodeId field is set.
     */
    boolean hasLastInodeId();
    /**
     * <code>optional uint64 lastInodeId = 1;</code>
     * @return The lastInodeId.
     */
    long getLastInodeId();

    /**
     * <pre>
     * repeated INodes..
     * </pre>
     *
     * <code>optional uint64 numInodes = 2;</code>
     * @return Whether the numInodes field is set.
     */
    boolean hasNumInodes();
    /**
     * <pre>
     * repeated INodes..
     * </pre>
     *
     * <code>optional uint64 numInodes = 2;</code>
     * @return The numInodes.
     */
    long getNumInodes();
  }
  /**
   * <pre>
   **
   * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
   * The first and the second parts are the string ids of the user and
   * group name, and the last 16 bits are the permission bits.
   *
   * Name: INODE
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection}
   */
  public static final class INodeSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection)
      INodeSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use INodeSection.newBuilder() to construct.
    private INodeSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private INodeSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new INodeSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class);
    }

    public interface FileUnderConstructionFeatureOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional string clientName = 1;</code>
       * @return Whether the clientName field is set.
       */
      boolean hasClientName();
      /**
       * <code>optional string clientName = 1;</code>
       * @return The clientName.
       */
      java.lang.String getClientName();
      /**
       * <code>optional string clientName = 1;</code>
       * @return The bytes for clientName.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientNameBytes();

      /**
       * <code>optional string clientMachine = 2;</code>
       * @return Whether the clientMachine field is set.
       */
      boolean hasClientMachine();
      /**
       * <code>optional string clientMachine = 2;</code>
       * @return The clientMachine.
       */
      java.lang.String getClientMachine();
      /**
       * <code>optional string clientMachine = 2;</code>
       * @return The bytes for clientMachine.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientMachineBytes();
    }
    /**
     * <pre>
     **
     * under-construction feature for INodeFile
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature}
     */
    public static final class FileUnderConstructionFeature extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
        FileUnderConstructionFeatureOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use FileUnderConstructionFeature.newBuilder() to construct.
      private FileUnderConstructionFeature(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private FileUnderConstructionFeature() {
        clientName_ = "";
        clientMachine_ = "";
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new FileUnderConstructionFeature();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class);
      }

      private int bitField0_;
      public static final int CLIENTNAME_FIELD_NUMBER = 1;
      @SuppressWarnings("serial")
      private volatile java.lang.Object clientName_ = "";
      /**
       * <code>optional string clientName = 1;</code>
       * @return Whether the clientName field is set.
       */
      @java.lang.Override
      public boolean hasClientName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string clientName = 1;</code>
       * @return The clientName.
       */
      @java.lang.Override
      public java.lang.String getClientName() {
        java.lang.Object ref = clientName_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            clientName_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string clientName = 1;</code>
       * @return The bytes for clientName.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientNameBytes() {
        java.lang.Object ref = clientName_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          clientName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      public static final int CLIENTMACHINE_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private volatile java.lang.Object clientMachine_ = "";
      /**
       * <code>optional string clientMachine = 2;</code>
       * @return Whether the clientMachine field is set.
       */
      @java.lang.Override
      public boolean hasClientMachine() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string clientMachine = 2;</code>
       * @return The clientMachine.
       */
      @java.lang.Override
      public java.lang.String getClientMachine() {
        java.lang.Object ref = clientMachine_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            clientMachine_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string clientMachine = 2;</code>
       * @return The bytes for clientMachine.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientMachineBytes() {
        java.lang.Object ref = clientMachine_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          clientMachine_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, clientName_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, clientMachine_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, clientName_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, clientMachine_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) obj;

        if (hasClientName() != other.hasClientName()) return false;
        if (hasClientName()) {
          if (!getClientName()
              .equals(other.getClientName())) return false;
        }
        if (hasClientMachine() != other.hasClientMachine()) return false;
        if (hasClientMachine()) {
          if (!getClientMachine()
              .equals(other.getClientMachine())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasClientName()) {
          hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
          hash = (53 * hash) + getClientName().hashCode();
        }
        if (hasClientMachine()) {
          hash = (37 * hash) + CLIENTMACHINE_FIELD_NUMBER;
          hash = (53 * hash) + getClientMachine().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * <pre>
       **
       * under-construction feature for INodeFile
       * </pre>
       *
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          clientName_ = "";
          clientMachine_ = "";
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.clientName_ = clientName_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.clientMachine_ = clientMachine_;
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) return this;
          if (other.hasClientName()) {
            clientName_ = other.clientName_;
            bitField0_ |= 0x00000001;
            onChanged();
          }
          if (other.hasClientMachine()) {
            clientMachine_ = other.clientMachine_;
            bitField0_ |= 0x00000002;
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 10: {
                  clientName_ = input.readBytes();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 10
                case 18: {
                  clientMachine_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private java.lang.Object clientName_ = "";
        /**
         * <code>optional string clientName = 1;</code>
         * @return Whether the clientName field is set.
         */
        public boolean hasClientName() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional string clientName = 1;</code>
         * @return The clientName.
         */
        public java.lang.String getClientName() {
          java.lang.Object ref = clientName_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              clientName_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string clientName = 1;</code>
         * @return The bytes for clientName.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getClientNameBytes() {
          java.lang.Object ref = clientName_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            clientName_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string clientName = 1;</code>
         * @param value The clientName to set.
         * @return This builder for chaining.
         */
        public Builder setClientName(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          clientName_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional string clientName = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearClientName() {
          clientName_ = getDefaultInstance().getClientName();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
          return this;
        }
        /**
         * <code>optional string clientName = 1;</code>
         * @param value The bytes for clientName to set.
         * @return This builder for chaining.
         */
        public Builder setClientNameBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          clientName_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }

        private java.lang.Object clientMachine_ = "";
        /**
         * <code>optional string clientMachine = 2;</code>
         * @return Whether the clientMachine field is set.
         */
        public boolean hasClientMachine() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional string clientMachine = 2;</code>
         * @return The clientMachine.
         */
        public java.lang.String getClientMachine() {
          java.lang.Object ref = clientMachine_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              clientMachine_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string clientMachine = 2;</code>
         * @return The bytes for clientMachine.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getClientMachineBytes() {
          java.lang.Object ref = clientMachine_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            clientMachine_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string clientMachine = 2;</code>
         * @param value The clientMachine to set.
         * @return This builder for chaining.
         */
        public Builder setClientMachine(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          clientMachine_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional string clientMachine = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearClientMachine() {
          clientMachine_ = getDefaultInstance().getClientMachine();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
          return this;
        }
        /**
         * <code>optional string clientMachine = 2;</code>
         * @param value The bytes for clientMachine to set.
         * @return This builder for chaining.
         */
        public Builder setClientMachineBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          clientMachine_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionFeature>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FileUnderConstructionFeature>() {
        @java.lang.Override
        public FileUnderConstructionFeature parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionFeature> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionFeature> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface AclFeatureProtoOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @return A list containing the entries.
       */
      java.util.List<java.lang.Integer> getEntriesList();
      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @return The count of entries.
       */
      int getEntriesCount();
      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The entries at the given index.
       */
      int getEntries(int index);
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto}
     */
    public static final class AclFeatureProto extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
        AclFeatureProtoOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use AclFeatureProto.newBuilder() to construct.
      private AclFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private AclFeatureProto() {
        entries_ = emptyIntList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new AclFeatureProto();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class);
      }

      public static final int ENTRIES_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private org.apache.hadoop.thirdparty.protobuf.Internal.IntList entries_ =
          emptyIntList();
      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @return A list containing the entries.
       */
      @java.lang.Override
      public java.util.List<java.lang.Integer>
          getEntriesList() {
        return entries_;
      }
      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @return The count of entries.
       */
      public int getEntriesCount() {
        return entries_.size();
      }
      /**
       * <pre>
       **
       * An ACL entry is represented by a 32-bit integer in Big Endian
       * format. The bits can be divided in four segments:
       * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
       *
       * [0:2) -- reserved for future uses.
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- the scope of the entry (AclEntryScopeProto)
       * [27:29) -- the type of the entry (AclEntryTypeProto)
       * [29:32) -- the permission of the entry (FsActionProto)
       * </pre>
       *
       * <code>repeated fixed32 entries = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The entries at the given index.
       */
      public int getEntries(int index) {
        return entries_.getInt(index);
      }
      private int entriesMemoizedSerializedSize = -1;

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        getSerializedSize();
        if (getEntriesList().size() > 0) {
          output.writeUInt32NoTag(18);
          output.writeUInt32NoTag(entriesMemoizedSerializedSize);
        }
        for (int i = 0; i < entries_.size(); i++) {
          output.writeFixed32NoTag(entries_.getInt(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        {
          int dataSize = 0;
          dataSize = 4 * getEntriesList().size();
          size += dataSize;
          if (!getEntriesList().isEmpty()) {
            size += 1;
            size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
                .computeInt32SizeNoTag(dataSize);
          }
          entriesMemoizedSerializedSize = dataSize;
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) obj;

        if (!getEntriesList()
            .equals(other.getEntriesList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (getEntriesCount() > 0) {
          hash = (37 * hash) + ENTRIES_FIELD_NUMBER;
          hash = (53 * hash) + getEntriesList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.AclFeatureProto}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          entries_ = emptyIntList();
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto result) {
          int from_bitField0_ = bitField0_;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            entries_.makeImmutable();
            result.entries_ = entries_;
          }
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) return this;
          if (!other.entries_.isEmpty()) {
            if (entries_.isEmpty()) {
              entries_ = other.entries_;
              entries_.makeImmutable();
              bitField0_ |= 0x00000001;
            } else {
              ensureEntriesIsMutable();
              entries_.addAll(other.entries_);
            }
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 21: {
                  int v = input.readFixed32();
                  ensureEntriesIsMutable();
                  entries_.addInt(v);
                  break;
                } // case 21
                case 18: {
                  int length = input.readRawVarint32();
                  int limit = input.pushLimit(length);
                  int alloc = length > 4096 ? 4096 : length;
                  ensureEntriesIsMutable(alloc / 4);
                  while (input.getBytesUntilLimit() > 0) {
                    entries_.addInt(input.readFixed32());
                  }
                  input.popLimit(limit);
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private org.apache.hadoop.thirdparty.protobuf.Internal.IntList entries_ = emptyIntList();
        private void ensureEntriesIsMutable() {
          if (!entries_.isModifiable()) {
            entries_ = makeMutableCopy(entries_);
          }
          bitField0_ |= 0x00000001;
        }
        private void ensureEntriesIsMutable(int capacity) {
          if (!entries_.isModifiable()) {
            entries_ = makeMutableCopy(entries_, capacity);
          }
          bitField0_ |= 0x00000001;
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @return A list containing the entries.
         */
        public java.util.List<java.lang.Integer>
            getEntriesList() {
          entries_.makeImmutable();
          return entries_;
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @return The count of entries.
         */
        public int getEntriesCount() {
          return entries_.size();
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @param index The index of the element to return.
         * @return The entries at the given index.
         */
        public int getEntries(int index) {
          return entries_.getInt(index);
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @param index The index to set the value at.
         * @param value The entries to set.
         * @return This builder for chaining.
         */
        public Builder setEntries(
            int index, int value) {

          ensureEntriesIsMutable();
          entries_.setInt(index, value);
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @param value The entries to add.
         * @return This builder for chaining.
         */
        public Builder addEntries(int value) {

          ensureEntriesIsMutable();
          entries_.addInt(value);
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @param values The entries to add.
         * @return This builder for chaining.
         */
        public Builder addAllEntries(
            java.lang.Iterable<? extends java.lang.Integer> values) {
          ensureEntriesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, entries_);
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <pre>
         **
         * An ACL entry is represented by a 32-bit integer in Big Endian
         * format. The bits can be divided in four segments:
         * [0:2) || [2:26) || [26:27) || [27:29) || [29:32)
         *
         * [0:2) -- reserved for future uses.
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- the scope of the entry (AclEntryScopeProto)
         * [27:29) -- the type of the entry (AclEntryTypeProto)
         * [29:32) -- the permission of the entry (FsActionProto)
         * </pre>
         *
         * <code>repeated fixed32 entries = 2 [packed = true];</code>
         * @return This builder for chaining.
         */
        public Builder clearEntries() {
          entries_ = emptyIntList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.AclFeatureProto)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<AclFeatureProto>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<AclFeatureProto>() {
        @java.lang.Override
        public AclFeatureProto parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<AclFeatureProto> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<AclFeatureProto> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface XAttrCompactProtoOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <pre>
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * </pre>
       *
       * <code>required fixed32 name = 1;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <pre>
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * </pre>
       *
       * <code>required fixed32 name = 1;</code>
       * @return The name.
       */
      int getName();

      /**
       * <code>optional bytes value = 2;</code>
       * @return Whether the value field is set.
       */
      boolean hasValue();
      /**
       * <code>optional bytes value = 2;</code>
       * @return The value.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getValue();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto}
     */
    public static final class XAttrCompactProto extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
        XAttrCompactProtoOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use XAttrCompactProto.newBuilder() to construct.
      private XAttrCompactProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private XAttrCompactProto() {
        value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new XAttrCompactProto();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class);
      }

      private int bitField0_;
      public static final int NAME_FIELD_NUMBER = 1;
      private int name_ = 0;
      /**
       * <pre>
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * </pre>
       *
       * <code>required fixed32 name = 1;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       **
       * 
       * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
       * [2:26) -- the name of the entry, which is an ID that points to a
       * string in the StringTableSection.
       * [26:27) -- namespace extension. Originally there were only 4 namespaces
       * so only 2 bits were needed. At that time, this bit was reserved. When a
       * 5th namespace was created (raw) this bit became used as a 3rd namespace
       * bit.
       * [27:32) -- reserved for future uses.
       * </pre>
       *
       * <code>required fixed32 name = 1;</code>
       * @return The name.
       */
      @java.lang.Override
      public int getName() {
        return name_;
      }

      public static final int VALUE_FIELD_NUMBER = 2;
      private org.apache.hadoop.thirdparty.protobuf.ByteString value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes value = 2;</code>
       * @return Whether the value field is set.
       */
      @java.lang.Override
      public boolean hasValue() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bytes value = 2;</code>
       * @return The value.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getValue() {
        return value_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (!hasName()) {
          memoizedIsInitialized = 0;
          return false;
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeFixed32(1, name_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeBytes(2, value_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeFixed32Size(1, name_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(2, value_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) obj;

        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (getName()
              != other.getName()) return false;
        }
        if (hasValue() != other.hasValue()) return false;
        if (hasValue()) {
          if (!getValue()
              .equals(other.getValue())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName();
        }
        if (hasValue()) {
          hash = (37 * hash) + VALUE_FIELD_NUMBER;
          hash = (53 * hash) + getValue().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          name_ = 0;
          value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.value_ = value_;
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance()) return this;
          if (other.hasName()) {
            setName(other.getName());
          }
          if (other.hasValue()) {
            setValue(other.getValue());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (!hasName()) {
            return false;
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 13: {
                  name_ = input.readFixed32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 13
                case 18: {
                  value_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int name_ ;
        /**
         * <pre>
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * </pre>
         *
         * <code>required fixed32 name = 1;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <pre>
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * </pre>
         *
         * <code>required fixed32 name = 1;</code>
         * @return The name.
         */
        @java.lang.Override
        public int getName() {
          return name_;
        }
        /**
         * <pre>
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * </pre>
         *
         * <code>required fixed32 name = 1;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(int value) {

          name_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <pre>
         **
         * 
         * [0:2) -- the namespace of XAttr (XAttrNamespaceProto)
         * [2:26) -- the name of the entry, which is an ID that points to a
         * string in the StringTableSection.
         * [26:27) -- namespace extension. Originally there were only 4 namespaces
         * so only 2 bits were needed. At that time, this bit was reserved. When a
         * 5th namespace was created (raw) this bit became used as a 3rd namespace
         * bit.
         * [27:32) -- reserved for future uses.
         * </pre>
         *
         * <code>required fixed32 name = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000001);
          name_ = 0;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString value_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes value = 2;</code>
         * @return Whether the value field is set.
         */
        @java.lang.Override
        public boolean hasValue() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional bytes value = 2;</code>
         * @return The value.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getValue() {
          return value_;
        }
        /**
         * <code>optional bytes value = 2;</code>
         * @param value The value to set.
         * @return This builder for chaining.
         */
        public Builder setValue(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          value_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes value = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearValue() {
          bitField0_ = (bitField0_ & ~0x00000002);
          value_ = getDefaultInstance().getValue();
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<XAttrCompactProto>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<XAttrCompactProto>() {
        @java.lang.Override
        public XAttrCompactProto parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<XAttrCompactProto> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<XAttrCompactProto> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface XAttrFeatureProtoOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> 
          getXAttrsList();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index);
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      int getXAttrsCount();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
          getXAttrsOrBuilderList();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
          int index);
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto}
     */
    public static final class XAttrFeatureProto extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
        XAttrFeatureProtoOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use XAttrFeatureProto.newBuilder() to construct.
      private XAttrFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private XAttrFeatureProto() {
        xAttrs_ = java.util.Collections.emptyList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new XAttrFeatureProto();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class);
      }

      public static final int XATTRS_FIELD_NUMBER = 1;
      @SuppressWarnings("serial")
      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> xAttrs_;
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      @java.lang.Override
      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> getXAttrsList() {
        return xAttrs_;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      @java.lang.Override
      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
          getXAttrsOrBuilderList() {
        return xAttrs_;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      @java.lang.Override
      public int getXAttrsCount() {
        return xAttrs_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) {
        return xAttrs_.get(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
          int index) {
        return xAttrs_.get(index);
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        for (int i = 0; i < getXAttrsCount(); i++) {
          if (!getXAttrs(i).isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        for (int i = 0; i < xAttrs_.size(); i++) {
          output.writeMessage(1, xAttrs_.get(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        for (int i = 0; i < xAttrs_.size(); i++) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(1, xAttrs_.get(i));
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) obj;

        if (!getXAttrsList()
            .equals(other.getXAttrsList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (getXAttrsCount() > 0) {
          hash = (37 * hash) + XATTRS_FIELD_NUMBER;
          hash = (53 * hash) + getXAttrsList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          if (xAttrsBuilder_ == null) {
            xAttrs_ = java.util.Collections.emptyList();
          } else {
            xAttrs_ = null;
            xAttrsBuilder_.clear();
          }
          bitField0_ = (bitField0_ & ~0x00000001);
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto(this);
          buildPartialRepeatedFields(result);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result) {
          if (xAttrsBuilder_ == null) {
            if (((bitField0_ & 0x00000001) != 0)) {
              xAttrs_ = java.util.Collections.unmodifiableList(xAttrs_);
              bitField0_ = (bitField0_ & ~0x00000001);
            }
            result.xAttrs_ = xAttrs_;
          } else {
            result.xAttrs_ = xAttrsBuilder_.build();
          }
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto result) {
          int from_bitField0_ = bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) return this;
          if (xAttrsBuilder_ == null) {
            if (!other.xAttrs_.isEmpty()) {
              if (xAttrs_.isEmpty()) {
                xAttrs_ = other.xAttrs_;
                bitField0_ = (bitField0_ & ~0x00000001);
              } else {
                ensureXAttrsIsMutable();
                xAttrs_.addAll(other.xAttrs_);
              }
              onChanged();
            }
          } else {
            if (!other.xAttrs_.isEmpty()) {
              if (xAttrsBuilder_.isEmpty()) {
                xAttrsBuilder_.dispose();
                xAttrsBuilder_ = null;
                xAttrs_ = other.xAttrs_;
                bitField0_ = (bitField0_ & ~0x00000001);
                xAttrsBuilder_ = 
                  org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                     getXAttrsFieldBuilder() : null;
              } else {
                xAttrsBuilder_.addAllMessages(other.xAttrs_);
              }
            }
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          for (int i = 0; i < getXAttrsCount(); i++) {
            if (!getXAttrs(i).isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 10: {
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto m =
                      input.readMessage(
                          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.PARSER,
                          extensionRegistry);
                  if (xAttrsBuilder_ == null) {
                    ensureXAttrsIsMutable();
                    xAttrs_.add(m);
                  } else {
                    xAttrsBuilder_.addMessage(m);
                  }
                  break;
                } // case 10
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> xAttrs_ =
          java.util.Collections.emptyList();
        private void ensureXAttrsIsMutable() {
          if (!((bitField0_ & 0x00000001) != 0)) {
            xAttrs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto>(xAttrs_);
            bitField0_ |= 0x00000001;
           }
        }

        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> xAttrsBuilder_;

        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> getXAttrsList() {
          if (xAttrsBuilder_ == null) {
            return java.util.Collections.unmodifiableList(xAttrs_);
          } else {
            return xAttrsBuilder_.getMessageList();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public int getXAttrsCount() {
          if (xAttrsBuilder_ == null) {
            return xAttrs_.size();
          } else {
            return xAttrsBuilder_.getCount();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto getXAttrs(int index) {
          if (xAttrsBuilder_ == null) {
            return xAttrs_.get(index);
          } else {
            return xAttrsBuilder_.getMessage(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder setXAttrs(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
          if (xAttrsBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureXAttrsIsMutable();
            xAttrs_.set(index, value);
            onChanged();
          } else {
            xAttrsBuilder_.setMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder setXAttrs(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
          if (xAttrsBuilder_ == null) {
            ensureXAttrsIsMutable();
            xAttrs_.set(index, builderForValue.build());
            onChanged();
          } else {
            xAttrsBuilder_.setMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder addXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
          if (xAttrsBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureXAttrsIsMutable();
            xAttrs_.add(value);
            onChanged();
          } else {
            xAttrsBuilder_.addMessage(value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder addXAttrs(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto value) {
          if (xAttrsBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureXAttrsIsMutable();
            xAttrs_.add(index, value);
            onChanged();
          } else {
            xAttrsBuilder_.addMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder addXAttrs(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
          if (xAttrsBuilder_ == null) {
            ensureXAttrsIsMutable();
            xAttrs_.add(builderForValue.build());
            onChanged();
          } else {
            xAttrsBuilder_.addMessage(builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder addXAttrs(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder builderForValue) {
          if (xAttrsBuilder_ == null) {
            ensureXAttrsIsMutable();
            xAttrs_.add(index, builderForValue.build());
            onChanged();
          } else {
            xAttrsBuilder_.addMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder addAllXAttrs(
            java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto> values) {
          if (xAttrsBuilder_ == null) {
            ensureXAttrsIsMutable();
            org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
                values, xAttrs_);
            onChanged();
          } else {
            xAttrsBuilder_.addAllMessages(values);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder clearXAttrs() {
          if (xAttrsBuilder_ == null) {
            xAttrs_ = java.util.Collections.emptyList();
            bitField0_ = (bitField0_ & ~0x00000001);
            onChanged();
          } else {
            xAttrsBuilder_.clear();
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public Builder removeXAttrs(int index) {
          if (xAttrsBuilder_ == null) {
            ensureXAttrsIsMutable();
            xAttrs_.remove(index);
            onChanged();
          } else {
            xAttrsBuilder_.remove(index);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder getXAttrsBuilder(
            int index) {
          return getXAttrsFieldBuilder().getBuilder(index);
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder getXAttrsOrBuilder(
            int index) {
          if (xAttrsBuilder_ == null) {
            return xAttrs_.get(index);  } else {
            return xAttrsBuilder_.getMessageOrBuilder(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
             getXAttrsOrBuilderList() {
          if (xAttrsBuilder_ != null) {
            return xAttrsBuilder_.getMessageOrBuilderList();
          } else {
            return java.util.Collections.unmodifiableList(xAttrs_);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder() {
          return getXAttrsFieldBuilder().addBuilder(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder addXAttrsBuilder(
            int index) {
          return getXAttrsFieldBuilder().addBuilder(
              index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.XAttrCompactProto xAttrs = 1;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder> 
             getXAttrsBuilderList() {
          return getXAttrsFieldBuilder().getBuilderList();
        }
        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder> 
            getXAttrsFieldBuilder() {
          if (xAttrsBuilder_ == null) {
            xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProtoOrBuilder>(
                    xAttrs_,
                    ((bitField0_ & 0x00000001) != 0),
                    getParentForChildren(),
                    isClean());
            xAttrs_ = null;
          }
          return xAttrsBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<XAttrFeatureProto>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<XAttrFeatureProto>() {
        @java.lang.Override
        public XAttrFeatureProto parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<XAttrFeatureProto> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<XAttrFeatureProto> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface INodeFileOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeFile)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 replication = 1;</code>
       * @return Whether the replication field is set.
       */
      boolean hasReplication();
      /**
       * <code>optional uint32 replication = 1;</code>
       * @return The replication.
       */
      int getReplication();

      /**
       * <code>optional uint64 modificationTime = 2;</code>
       * @return Whether the modificationTime field is set.
       */
      boolean hasModificationTime();
      /**
       * <code>optional uint64 modificationTime = 2;</code>
       * @return The modificationTime.
       */
      long getModificationTime();

      /**
       * <code>optional uint64 accessTime = 3;</code>
       * @return Whether the accessTime field is set.
       */
      boolean hasAccessTime();
      /**
       * <code>optional uint64 accessTime = 3;</code>
       * @return The accessTime.
       */
      long getAccessTime();

      /**
       * <code>optional uint64 preferredBlockSize = 4;</code>
       * @return Whether the preferredBlockSize field is set.
       */
      boolean hasPreferredBlockSize();
      /**
       * <code>optional uint64 preferredBlockSize = 4;</code>
       * @return The preferredBlockSize.
       */
      long getPreferredBlockSize();

      /**
       * <code>optional fixed64 permission = 5;</code>
       * @return Whether the permission field is set.
       */
      boolean hasPermission();
      /**
       * <code>optional fixed64 permission = 5;</code>
       * @return The permission.
       */
      long getPermission();

      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> 
          getBlocksList();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      int getBlocksCount();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlocksOrBuilderList();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
          int index);

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       * @return Whether the fileUC field is set.
       */
      boolean hasFileUC();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       * @return The fileUC.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       * @return Whether the acl field is set.
       */
      boolean hasAcl();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       * @return The acl.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       * @return Whether the xAttrs field is set.
       */
      boolean hasXAttrs();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       * @return The xAttrs.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder();

      /**
       * <code>optional uint32 storagePolicyID = 10;</code>
       * @return Whether the storagePolicyID field is set.
       */
      boolean hasStoragePolicyID();
      /**
       * <code>optional uint32 storagePolicyID = 10;</code>
       * @return The storagePolicyID.
       */
      int getStoragePolicyID();

      /**
       * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
       * @return Whether the blockType field is set.
       */
      boolean hasBlockType();
      /**
       * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
       * @return The blockType.
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType();

      /**
       * <code>optional uint32 erasureCodingPolicyID = 12;</code>
       * @return Whether the erasureCodingPolicyID field is set.
       */
      boolean hasErasureCodingPolicyID();
      /**
       * <code>optional uint32 erasureCodingPolicyID = 12;</code>
       * @return The erasureCodingPolicyID.
       */
      int getErasureCodingPolicyID();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile}
     */
    public static final class INodeFile extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeFile)
        INodeFileOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use INodeFile.newBuilder() to construct.
      private INodeFile(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private INodeFile() {
        blocks_ = java.util.Collections.emptyList();
        blockType_ = 0;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new INodeFile();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class);
      }

      private int bitField0_;
      public static final int REPLICATION_FIELD_NUMBER = 1;
      private int replication_ = 0;
      /**
       * <code>optional uint32 replication = 1;</code>
       * @return Whether the replication field is set.
       */
      @java.lang.Override
      public boolean hasReplication() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 replication = 1;</code>
       * @return The replication.
       */
      @java.lang.Override
      public int getReplication() {
        return replication_;
      }

      public static final int MODIFICATIONTIME_FIELD_NUMBER = 2;
      private long modificationTime_ = 0L;
      /**
       * <code>optional uint64 modificationTime = 2;</code>
       * @return Whether the modificationTime field is set.
       */
      @java.lang.Override
      public boolean hasModificationTime() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 modificationTime = 2;</code>
       * @return The modificationTime.
       */
      @java.lang.Override
      public long getModificationTime() {
        return modificationTime_;
      }

      public static final int ACCESSTIME_FIELD_NUMBER = 3;
      private long accessTime_ = 0L;
      /**
       * <code>optional uint64 accessTime = 3;</code>
       * @return Whether the accessTime field is set.
       */
      @java.lang.Override
      public boolean hasAccessTime() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 accessTime = 3;</code>
       * @return The accessTime.
       */
      @java.lang.Override
      public long getAccessTime() {
        return accessTime_;
      }

      public static final int PREFERREDBLOCKSIZE_FIELD_NUMBER = 4;
      private long preferredBlockSize_ = 0L;
      /**
       * <code>optional uint64 preferredBlockSize = 4;</code>
       * @return Whether the preferredBlockSize field is set.
       */
      @java.lang.Override
      public boolean hasPreferredBlockSize() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint64 preferredBlockSize = 4;</code>
       * @return The preferredBlockSize.
       */
      @java.lang.Override
      public long getPreferredBlockSize() {
        return preferredBlockSize_;
      }

      public static final int PERMISSION_FIELD_NUMBER = 5;
      private long permission_ = 0L;
      /**
       * <code>optional fixed64 permission = 5;</code>
       * @return Whether the permission field is set.
       */
      @java.lang.Override
      public boolean hasPermission() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional fixed64 permission = 5;</code>
       * @return The permission.
       */
      @java.lang.Override
      public long getPermission() {
        return permission_;
      }

      public static final int BLOCKS_FIELD_NUMBER = 6;
      @SuppressWarnings("serial")
      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      @java.lang.Override
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
        return blocks_;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      @java.lang.Override
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlocksOrBuilderList() {
        return blocks_;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      @java.lang.Override
      public int getBlocksCount() {
        return blocks_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
        return blocks_.get(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        return blocks_.get(index);
      }

      public static final int FILEUC_FIELD_NUMBER = 7;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       * @return Whether the fileUC field is set.
       */
      @java.lang.Override
      public boolean hasFileUC() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       * @return The fileUC.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() {
        return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() {
        return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_;
      }

      public static final int ACL_FIELD_NUMBER = 8;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       * @return Whether the acl field is set.
       */
      @java.lang.Override
      public boolean hasAcl() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       * @return The acl.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
        return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
        return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
      }

      public static final int XATTRS_FIELD_NUMBER = 9;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       * @return Whether the xAttrs field is set.
       */
      @java.lang.Override
      public boolean hasXAttrs() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       * @return The xAttrs.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
        return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
        return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
      }

      public static final int STORAGEPOLICYID_FIELD_NUMBER = 10;
      private int storagePolicyID_ = 0;
      /**
       * <code>optional uint32 storagePolicyID = 10;</code>
       * @return Whether the storagePolicyID field is set.
       */
      @java.lang.Override
      public boolean hasStoragePolicyID() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional uint32 storagePolicyID = 10;</code>
       * @return The storagePolicyID.
       */
      @java.lang.Override
      public int getStoragePolicyID() {
        return storagePolicyID_;
      }

      public static final int BLOCKTYPE_FIELD_NUMBER = 11;
      private int blockType_ = 0;
      /**
       * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
       * @return Whether the blockType field is set.
       */
      @java.lang.Override public boolean hasBlockType() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
       * @return The blockType.
       */
      @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.forNumber(blockType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.CONTIGUOUS : result;
      }

      public static final int ERASURECODINGPOLICYID_FIELD_NUMBER = 12;
      private int erasureCodingPolicyID_ = 0;
      /**
       * <code>optional uint32 erasureCodingPolicyID = 12;</code>
       * @return Whether the erasureCodingPolicyID field is set.
       */
      @java.lang.Override
      public boolean hasErasureCodingPolicyID() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional uint32 erasureCodingPolicyID = 12;</code>
       * @return The erasureCodingPolicyID.
       */
      @java.lang.Override
      public int getErasureCodingPolicyID() {
        return erasureCodingPolicyID_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        if (hasXAttrs()) {
          if (!getXAttrs().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, replication_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, modificationTime_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt64(3, accessTime_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeUInt64(4, preferredBlockSize_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          output.writeFixed64(5, permission_);
        }
        for (int i = 0; i < blocks_.size(); i++) {
          output.writeMessage(6, blocks_.get(i));
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          output.writeMessage(7, getFileUC());
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          output.writeMessage(8, getAcl());
        }
        if (((bitField0_ & 0x00000080) != 0)) {
          output.writeMessage(9, getXAttrs());
        }
        if (((bitField0_ & 0x00000100) != 0)) {
          output.writeUInt32(10, storagePolicyID_);
        }
        if (((bitField0_ & 0x00000200) != 0)) {
          output.writeEnum(11, blockType_);
        }
        if (((bitField0_ & 0x00000400) != 0)) {
          output.writeUInt32(12, erasureCodingPolicyID_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, replication_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, modificationTime_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(3, accessTime_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(4, preferredBlockSize_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeFixed64Size(5, permission_);
        }
        for (int i = 0; i < blocks_.size(); i++) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(6, blocks_.get(i));
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(7, getFileUC());
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(8, getAcl());
        }
        if (((bitField0_ & 0x00000080) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(9, getXAttrs());
        }
        if (((bitField0_ & 0x00000100) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(10, storagePolicyID_);
        }
        if (((bitField0_ & 0x00000200) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSize(11, blockType_);
        }
        if (((bitField0_ & 0x00000400) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(12, erasureCodingPolicyID_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) obj;

        if (hasReplication() != other.hasReplication()) return false;
        if (hasReplication()) {
          if (getReplication()
              != other.getReplication()) return false;
        }
        if (hasModificationTime() != other.hasModificationTime()) return false;
        if (hasModificationTime()) {
          if (getModificationTime()
              != other.getModificationTime()) return false;
        }
        if (hasAccessTime() != other.hasAccessTime()) return false;
        if (hasAccessTime()) {
          if (getAccessTime()
              != other.getAccessTime()) return false;
        }
        if (hasPreferredBlockSize() != other.hasPreferredBlockSize()) return false;
        if (hasPreferredBlockSize()) {
          if (getPreferredBlockSize()
              != other.getPreferredBlockSize()) return false;
        }
        if (hasPermission() != other.hasPermission()) return false;
        if (hasPermission()) {
          if (getPermission()
              != other.getPermission()) return false;
        }
        if (!getBlocksList()
            .equals(other.getBlocksList())) return false;
        if (hasFileUC() != other.hasFileUC()) return false;
        if (hasFileUC()) {
          if (!getFileUC()
              .equals(other.getFileUC())) return false;
        }
        if (hasAcl() != other.hasAcl()) return false;
        if (hasAcl()) {
          if (!getAcl()
              .equals(other.getAcl())) return false;
        }
        if (hasXAttrs() != other.hasXAttrs()) return false;
        if (hasXAttrs()) {
          if (!getXAttrs()
              .equals(other.getXAttrs())) return false;
        }
        if (hasStoragePolicyID() != other.hasStoragePolicyID()) return false;
        if (hasStoragePolicyID()) {
          if (getStoragePolicyID()
              != other.getStoragePolicyID()) return false;
        }
        if (hasBlockType() != other.hasBlockType()) return false;
        if (hasBlockType()) {
          if (blockType_ != other.blockType_) return false;
        }
        if (hasErasureCodingPolicyID() != other.hasErasureCodingPolicyID()) return false;
        if (hasErasureCodingPolicyID()) {
          if (getErasureCodingPolicyID()
              != other.getErasureCodingPolicyID()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasReplication()) {
          hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
          hash = (53 * hash) + getReplication();
        }
        if (hasModificationTime()) {
          hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getModificationTime());
        }
        if (hasAccessTime()) {
          hash = (37 * hash) + ACCESSTIME_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getAccessTime());
        }
        if (hasPreferredBlockSize()) {
          hash = (37 * hash) + PREFERREDBLOCKSIZE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getPreferredBlockSize());
        }
        if (hasPermission()) {
          hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getPermission());
        }
        if (getBlocksCount() > 0) {
          hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
          hash = (53 * hash) + getBlocksList().hashCode();
        }
        if (hasFileUC()) {
          hash = (37 * hash) + FILEUC_FIELD_NUMBER;
          hash = (53 * hash) + getFileUC().hashCode();
        }
        if (hasAcl()) {
          hash = (37 * hash) + ACL_FIELD_NUMBER;
          hash = (53 * hash) + getAcl().hashCode();
        }
        if (hasXAttrs()) {
          hash = (37 * hash) + XATTRS_FIELD_NUMBER;
          hash = (53 * hash) + getXAttrs().hashCode();
        }
        if (hasStoragePolicyID()) {
          hash = (37 * hash) + STORAGEPOLICYID_FIELD_NUMBER;
          hash = (53 * hash) + getStoragePolicyID();
        }
        if (hasBlockType()) {
          hash = (37 * hash) + BLOCKTYPE_FIELD_NUMBER;
          hash = (53 * hash) + blockType_;
        }
        if (hasErasureCodingPolicyID()) {
          hash = (37 * hash) + ERASURECODINGPOLICYID_FIELD_NUMBER;
          hash = (53 * hash) + getErasureCodingPolicyID();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeFile}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeFile)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getBlocksFieldBuilder();
            getFileUCFieldBuilder();
            getAclFieldBuilder();
            getXAttrsFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          replication_ = 0;
          modificationTime_ = 0L;
          accessTime_ = 0L;
          preferredBlockSize_ = 0L;
          permission_ = 0L;
          if (blocksBuilder_ == null) {
            blocks_ = java.util.Collections.emptyList();
          } else {
            blocks_ = null;
            blocksBuilder_.clear();
          }
          bitField0_ = (bitField0_ & ~0x00000020);
          fileUC_ = null;
          if (fileUCBuilder_ != null) {
            fileUCBuilder_.dispose();
            fileUCBuilder_ = null;
          }
          acl_ = null;
          if (aclBuilder_ != null) {
            aclBuilder_.dispose();
            aclBuilder_ = null;
          }
          xAttrs_ = null;
          if (xAttrsBuilder_ != null) {
            xAttrsBuilder_.dispose();
            xAttrsBuilder_ = null;
          }
          storagePolicyID_ = 0;
          blockType_ = 0;
          erasureCodingPolicyID_ = 0;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile(this);
          buildPartialRepeatedFields(result);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result) {
          if (blocksBuilder_ == null) {
            if (((bitField0_ & 0x00000020) != 0)) {
              blocks_ = java.util.Collections.unmodifiableList(blocks_);
              bitField0_ = (bitField0_ & ~0x00000020);
            }
            result.blocks_ = blocks_;
          } else {
            result.blocks_ = blocksBuilder_.build();
          }
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.replication_ = replication_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.modificationTime_ = modificationTime_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.accessTime_ = accessTime_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.preferredBlockSize_ = preferredBlockSize_;
            to_bitField0_ |= 0x00000008;
          }
          if (((from_bitField0_ & 0x00000010) != 0)) {
            result.permission_ = permission_;
            to_bitField0_ |= 0x00000010;
          }
          if (((from_bitField0_ & 0x00000040) != 0)) {
            result.fileUC_ = fileUCBuilder_ == null
                ? fileUC_
                : fileUCBuilder_.build();
            to_bitField0_ |= 0x00000020;
          }
          if (((from_bitField0_ & 0x00000080) != 0)) {
            result.acl_ = aclBuilder_ == null
                ? acl_
                : aclBuilder_.build();
            to_bitField0_ |= 0x00000040;
          }
          if (((from_bitField0_ & 0x00000100) != 0)) {
            result.xAttrs_ = xAttrsBuilder_ == null
                ? xAttrs_
                : xAttrsBuilder_.build();
            to_bitField0_ |= 0x00000080;
          }
          if (((from_bitField0_ & 0x00000200) != 0)) {
            result.storagePolicyID_ = storagePolicyID_;
            to_bitField0_ |= 0x00000100;
          }
          if (((from_bitField0_ & 0x00000400) != 0)) {
            result.blockType_ = blockType_;
            to_bitField0_ |= 0x00000200;
          }
          if (((from_bitField0_ & 0x00000800) != 0)) {
            result.erasureCodingPolicyID_ = erasureCodingPolicyID_;
            to_bitField0_ |= 0x00000400;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) return this;
          if (other.hasReplication()) {
            setReplication(other.getReplication());
          }
          if (other.hasModificationTime()) {
            setModificationTime(other.getModificationTime());
          }
          if (other.hasAccessTime()) {
            setAccessTime(other.getAccessTime());
          }
          if (other.hasPreferredBlockSize()) {
            setPreferredBlockSize(other.getPreferredBlockSize());
          }
          if (other.hasPermission()) {
            setPermission(other.getPermission());
          }
          if (blocksBuilder_ == null) {
            if (!other.blocks_.isEmpty()) {
              if (blocks_.isEmpty()) {
                blocks_ = other.blocks_;
                bitField0_ = (bitField0_ & ~0x00000020);
              } else {
                ensureBlocksIsMutable();
                blocks_.addAll(other.blocks_);
              }
              onChanged();
            }
          } else {
            if (!other.blocks_.isEmpty()) {
              if (blocksBuilder_.isEmpty()) {
                blocksBuilder_.dispose();
                blocksBuilder_ = null;
                blocks_ = other.blocks_;
                bitField0_ = (bitField0_ & ~0x00000020);
                blocksBuilder_ = 
                  org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                     getBlocksFieldBuilder() : null;
              } else {
                blocksBuilder_.addAllMessages(other.blocks_);
              }
            }
          }
          if (other.hasFileUC()) {
            mergeFileUC(other.getFileUC());
          }
          if (other.hasAcl()) {
            mergeAcl(other.getAcl());
          }
          if (other.hasXAttrs()) {
            mergeXAttrs(other.getXAttrs());
          }
          if (other.hasStoragePolicyID()) {
            setStoragePolicyID(other.getStoragePolicyID());
          }
          if (other.hasBlockType()) {
            setBlockType(other.getBlockType());
          }
          if (other.hasErasureCodingPolicyID()) {
            setErasureCodingPolicyID(other.getErasureCodingPolicyID());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          for (int i = 0; i < getBlocksCount(); i++) {
            if (!getBlocks(i).isInitialized()) {
              return false;
            }
          }
          if (hasXAttrs()) {
            if (!getXAttrs().isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  replication_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  modificationTime_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 24: {
                  accessTime_ = input.readUInt64();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                case 32: {
                  preferredBlockSize_ = input.readUInt64();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 32
                case 41: {
                  permission_ = input.readFixed64();
                  bitField0_ |= 0x00000010;
                  break;
                } // case 41
                case 50: {
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto m =
                      input.readMessage(
                          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER,
                          extensionRegistry);
                  if (blocksBuilder_ == null) {
                    ensureBlocksIsMutable();
                    blocks_.add(m);
                  } else {
                    blocksBuilder_.addMessage(m);
                  }
                  break;
                } // case 50
                case 58: {
                  input.readMessage(
                      getFileUCFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000040;
                  break;
                } // case 58
                case 66: {
                  input.readMessage(
                      getAclFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000080;
                  break;
                } // case 66
                case 74: {
                  input.readMessage(
                      getXAttrsFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000100;
                  break;
                } // case 74
                case 80: {
                  storagePolicyID_ = input.readUInt32();
                  bitField0_ |= 0x00000200;
                  break;
                } // case 80
                case 88: {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(11, tmpRaw);
                  } else {
                    blockType_ = tmpRaw;
                    bitField0_ |= 0x00000400;
                  }
                  break;
                } // case 88
                case 96: {
                  erasureCodingPolicyID_ = input.readUInt32();
                  bitField0_ |= 0x00000800;
                  break;
                } // case 96
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int replication_ ;
        /**
         * <code>optional uint32 replication = 1;</code>
         * @return Whether the replication field is set.
         */
        @java.lang.Override
        public boolean hasReplication() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 replication = 1;</code>
         * @return The replication.
         */
        @java.lang.Override
        public int getReplication() {
          return replication_;
        }
        /**
         * <code>optional uint32 replication = 1;</code>
         * @param value The replication to set.
         * @return This builder for chaining.
         */
        public Builder setReplication(int value) {

          replication_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 replication = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearReplication() {
          bitField0_ = (bitField0_ & ~0x00000001);
          replication_ = 0;
          onChanged();
          return this;
        }

        private long modificationTime_ ;
        /**
         * <code>optional uint64 modificationTime = 2;</code>
         * @return Whether the modificationTime field is set.
         */
        @java.lang.Override
        public boolean hasModificationTime() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint64 modificationTime = 2;</code>
         * @return The modificationTime.
         */
        @java.lang.Override
        public long getModificationTime() {
          return modificationTime_;
        }
        /**
         * <code>optional uint64 modificationTime = 2;</code>
         * @param value The modificationTime to set.
         * @return This builder for chaining.
         */
        public Builder setModificationTime(long value) {

          modificationTime_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 modificationTime = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearModificationTime() {
          bitField0_ = (bitField0_ & ~0x00000002);
          modificationTime_ = 0L;
          onChanged();
          return this;
        }

        private long accessTime_ ;
        /**
         * <code>optional uint64 accessTime = 3;</code>
         * @return Whether the accessTime field is set.
         */
        @java.lang.Override
        public boolean hasAccessTime() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional uint64 accessTime = 3;</code>
         * @return The accessTime.
         */
        @java.lang.Override
        public long getAccessTime() {
          return accessTime_;
        }
        /**
         * <code>optional uint64 accessTime = 3;</code>
         * @param value The accessTime to set.
         * @return This builder for chaining.
         */
        public Builder setAccessTime(long value) {

          accessTime_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 accessTime = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearAccessTime() {
          bitField0_ = (bitField0_ & ~0x00000004);
          accessTime_ = 0L;
          onChanged();
          return this;
        }

        private long preferredBlockSize_ ;
        /**
         * <code>optional uint64 preferredBlockSize = 4;</code>
         * @return Whether the preferredBlockSize field is set.
         */
        @java.lang.Override
        public boolean hasPreferredBlockSize() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional uint64 preferredBlockSize = 4;</code>
         * @return The preferredBlockSize.
         */
        @java.lang.Override
        public long getPreferredBlockSize() {
          return preferredBlockSize_;
        }
        /**
         * <code>optional uint64 preferredBlockSize = 4;</code>
         * @param value The preferredBlockSize to set.
         * @return This builder for chaining.
         */
        public Builder setPreferredBlockSize(long value) {

          preferredBlockSize_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 preferredBlockSize = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearPreferredBlockSize() {
          bitField0_ = (bitField0_ & ~0x00000008);
          preferredBlockSize_ = 0L;
          onChanged();
          return this;
        }

        private long permission_ ;
        /**
         * <code>optional fixed64 permission = 5;</code>
         * @return Whether the permission field is set.
         */
        @java.lang.Override
        public boolean hasPermission() {
          return ((bitField0_ & 0x00000010) != 0);
        }
        /**
         * <code>optional fixed64 permission = 5;</code>
         * @return The permission.
         */
        @java.lang.Override
        public long getPermission() {
          return permission_;
        }
        /**
         * <code>optional fixed64 permission = 5;</code>
         * @param value The permission to set.
         * @return This builder for chaining.
         */
        public Builder setPermission(long value) {

          permission_ = value;
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional fixed64 permission = 5;</code>
         * @return This builder for chaining.
         */
        public Builder clearPermission() {
          bitField0_ = (bitField0_ & ~0x00000010);
          permission_ = 0L;
          onChanged();
          return this;
        }

        private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
          java.util.Collections.emptyList();
        private void ensureBlocksIsMutable() {
          if (!((bitField0_ & 0x00000020) != 0)) {
            blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
            bitField0_ |= 0x00000020;
           }
        }

        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;

        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
          if (blocksBuilder_ == null) {
            return java.util.Collections.unmodifiableList(blocks_);
          } else {
            return blocksBuilder_.getMessageList();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public int getBlocksCount() {
          if (blocksBuilder_ == null) {
            return blocks_.size();
          } else {
            return blocksBuilder_.getCount();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
          if (blocksBuilder_ == null) {
            return blocks_.get(index);
          } else {
            return blocksBuilder_.getMessage(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder setBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.set(index, value);
            onChanged();
          } else {
            blocksBuilder_.setMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder setBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.set(index, builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.setMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.add(value);
            onChanged();
          } else {
            blocksBuilder_.addMessage(value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder addBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.add(index, value);
            onChanged();
          } else {
            blocksBuilder_.addMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder addBlocks(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.add(builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.addMessage(builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder addBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.add(index, builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.addMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder addAllBlocks(
            java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
                values, blocks_);
            onChanged();
          } else {
            blocksBuilder_.addAllMessages(values);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder clearBlocks() {
          if (blocksBuilder_ == null) {
            blocks_ = java.util.Collections.emptyList();
            bitField0_ = (bitField0_ & ~0x00000020);
            onChanged();
          } else {
            blocksBuilder_.clear();
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public Builder removeBlocks(int index) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.remove(index);
            onChanged();
          } else {
            blocksBuilder_.remove(index);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
            int index) {
          return getBlocksFieldBuilder().getBuilder(index);
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
            int index) {
          if (blocksBuilder_ == null) {
            return blocks_.get(index);  } else {
            return blocksBuilder_.getMessageOrBuilder(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
             getBlocksOrBuilderList() {
          if (blocksBuilder_ != null) {
            return blocksBuilder_.getMessageOrBuilderList();
          } else {
            return java.util.Collections.unmodifiableList(blocks_);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
          return getBlocksFieldBuilder().addBuilder(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
            int index) {
          return getBlocksFieldBuilder().addBuilder(
              index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 6;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder> 
             getBlocksBuilderList() {
          return getBlocksFieldBuilder().getBuilderList();
        }
        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
            getBlocksFieldBuilder() {
          if (blocksBuilder_ == null) {
            blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                    blocks_,
                    ((bitField0_ & 0x00000020) != 0),
                    getParentForChildren(),
                    isClean());
            blocks_ = null;
          }
          return blocksBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature fileUC_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> fileUCBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         * @return Whether the fileUC field is set.
         */
        public boolean hasFileUC() {
          return ((bitField0_ & 0x00000040) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         * @return The fileUC.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature getFileUC() {
          if (fileUCBuilder_ == null) {
            return fileUC_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_;
          } else {
            return fileUCBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public Builder setFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) {
          if (fileUCBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            fileUC_ = value;
          } else {
            fileUCBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public Builder setFileUC(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder builderForValue) {
          if (fileUCBuilder_ == null) {
            fileUC_ = builderForValue.build();
          } else {
            fileUCBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public Builder mergeFileUC(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature value) {
          if (fileUCBuilder_ == null) {
            if (((bitField0_ & 0x00000040) != 0) &&
              fileUC_ != null &&
              fileUC_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance()) {
              getFileUCBuilder().mergeFrom(value);
            } else {
              fileUC_ = value;
            }
          } else {
            fileUCBuilder_.mergeFrom(value);
          }
          if (fileUC_ != null) {
            bitField0_ |= 0x00000040;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public Builder clearFileUC() {
          bitField0_ = (bitField0_ & ~0x00000040);
          fileUC_ = null;
          if (fileUCBuilder_ != null) {
            fileUCBuilder_.dispose();
            fileUCBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder getFileUCBuilder() {
          bitField0_ |= 0x00000040;
          onChanged();
          return getFileUCFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder getFileUCOrBuilder() {
          if (fileUCBuilder_ != null) {
            return fileUCBuilder_.getMessageOrBuilder();
          } else {
            return fileUC_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.getDefaultInstance() : fileUC_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder> 
            getFileUCFieldBuilder() {
          if (fileUCBuilder_ == null) {
            fileUCBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeature.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.FileUnderConstructionFeatureOrBuilder>(
                    getFileUC(),
                    getParentForChildren(),
                    isClean());
            fileUC_ = null;
          }
          return fileUCBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         * @return Whether the acl field is set.
         */
        public boolean hasAcl() {
          return ((bitField0_ & 0x00000080) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         * @return The acl.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
          if (aclBuilder_ == null) {
            return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
          } else {
            return aclBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
          if (aclBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            acl_ = value;
          } else {
            aclBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public Builder setAcl(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) {
          if (aclBuilder_ == null) {
            acl_ = builderForValue.build();
          } else {
            aclBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
          if (aclBuilder_ == null) {
            if (((bitField0_ & 0x00000080) != 0) &&
              acl_ != null &&
              acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) {
              getAclBuilder().mergeFrom(value);
            } else {
              acl_ = value;
            }
          } else {
            aclBuilder_.mergeFrom(value);
          }
          if (acl_ != null) {
            bitField0_ |= 0x00000080;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public Builder clearAcl() {
          bitField0_ = (bitField0_ & ~0x00000080);
          acl_ = null;
          if (aclBuilder_ != null) {
            aclBuilder_.dispose();
            aclBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() {
          bitField0_ |= 0x00000080;
          onChanged();
          return getAclFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
          if (aclBuilder_ != null) {
            return aclBuilder_.getMessageOrBuilder();
          } else {
            return acl_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> 
            getAclFieldBuilder() {
          if (aclBuilder_ == null) {
            aclBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>(
                    getAcl(),
                    getParentForChildren(),
                    isClean());
            acl_ = null;
          }
          return aclBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         * @return Whether the xAttrs field is set.
         */
        public boolean hasXAttrs() {
          return ((bitField0_ & 0x00000100) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         * @return The xAttrs.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
          if (xAttrsBuilder_ == null) {
            return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
          } else {
            return xAttrsBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
          if (xAttrsBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            xAttrs_ = value;
          } else {
            xAttrsBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000100;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public Builder setXAttrs(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) {
          if (xAttrsBuilder_ == null) {
            xAttrs_ = builderForValue.build();
          } else {
            xAttrsBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000100;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
          if (xAttrsBuilder_ == null) {
            if (((bitField0_ & 0x00000100) != 0) &&
              xAttrs_ != null &&
              xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) {
              getXAttrsBuilder().mergeFrom(value);
            } else {
              xAttrs_ = value;
            }
          } else {
            xAttrsBuilder_.mergeFrom(value);
          }
          if (xAttrs_ != null) {
            bitField0_ |= 0x00000100;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public Builder clearXAttrs() {
          bitField0_ = (bitField0_ & ~0x00000100);
          xAttrs_ = null;
          if (xAttrsBuilder_ != null) {
            xAttrsBuilder_.dispose();
            xAttrsBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() {
          bitField0_ |= 0x00000100;
          onChanged();
          return getXAttrsFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
          if (xAttrsBuilder_ != null) {
            return xAttrsBuilder_.getMessageOrBuilder();
          } else {
            return xAttrs_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 9;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> 
            getXAttrsFieldBuilder() {
          if (xAttrsBuilder_ == null) {
            xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>(
                    getXAttrs(),
                    getParentForChildren(),
                    isClean());
            xAttrs_ = null;
          }
          return xAttrsBuilder_;
        }

        private int storagePolicyID_ ;
        /**
         * <code>optional uint32 storagePolicyID = 10;</code>
         * @return Whether the storagePolicyID field is set.
         */
        @java.lang.Override
        public boolean hasStoragePolicyID() {
          return ((bitField0_ & 0x00000200) != 0);
        }
        /**
         * <code>optional uint32 storagePolicyID = 10;</code>
         * @return The storagePolicyID.
         */
        @java.lang.Override
        public int getStoragePolicyID() {
          return storagePolicyID_;
        }
        /**
         * <code>optional uint32 storagePolicyID = 10;</code>
         * @param value The storagePolicyID to set.
         * @return This builder for chaining.
         */
        public Builder setStoragePolicyID(int value) {

          storagePolicyID_ = value;
          bitField0_ |= 0x00000200;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 storagePolicyID = 10;</code>
         * @return This builder for chaining.
         */
        public Builder clearStoragePolicyID() {
          bitField0_ = (bitField0_ & ~0x00000200);
          storagePolicyID_ = 0;
          onChanged();
          return this;
        }

        private int blockType_ = 0;
        /**
         * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
         * @return Whether the blockType field is set.
         */
        @java.lang.Override public boolean hasBlockType() {
          return ((bitField0_ & 0x00000400) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
         * @return The blockType.
         */
        @java.lang.Override
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto getBlockType() {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.forNumber(blockType_);
          return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto.CONTIGUOUS : result;
        }
        /**
         * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
         * @param value The blockType to set.
         * @return This builder for chaining.
         */
        public Builder setBlockType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto value) {
          if (value == null) {
            throw new NullPointerException();
          }
          bitField0_ |= 0x00000400;
          blockType_ = value.getNumber();
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.BlockTypeProto blockType = 11;</code>
         * @return This builder for chaining.
         */
        public Builder clearBlockType() {
          bitField0_ = (bitField0_ & ~0x00000400);
          blockType_ = 0;
          onChanged();
          return this;
        }

        private int erasureCodingPolicyID_ ;
        /**
         * <code>optional uint32 erasureCodingPolicyID = 12;</code>
         * @return Whether the erasureCodingPolicyID field is set.
         */
        @java.lang.Override
        public boolean hasErasureCodingPolicyID() {
          return ((bitField0_ & 0x00000800) != 0);
        }
        /**
         * <code>optional uint32 erasureCodingPolicyID = 12;</code>
         * @return The erasureCodingPolicyID.
         */
        @java.lang.Override
        public int getErasureCodingPolicyID() {
          return erasureCodingPolicyID_;
        }
        /**
         * <code>optional uint32 erasureCodingPolicyID = 12;</code>
         * @param value The erasureCodingPolicyID to set.
         * @return This builder for chaining.
         */
        public Builder setErasureCodingPolicyID(int value) {

          erasureCodingPolicyID_ = value;
          bitField0_ |= 0x00000800;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 erasureCodingPolicyID = 12;</code>
         * @return This builder for chaining.
         */
        public Builder clearErasureCodingPolicyID() {
          bitField0_ = (bitField0_ & ~0x00000800);
          erasureCodingPolicyID_ = 0;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeFile)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeFile>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeFile>() {
        @java.lang.Override
        public INodeFile parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeFile> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<INodeFile> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface QuotaByStorageTypeEntryProtoOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
       * @return Whether the storageType field is set.
       */
      boolean hasStorageType();
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
       * @return The storageType.
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();

      /**
       * <code>required uint64 quota = 2;</code>
       * @return Whether the quota field is set.
       */
      boolean hasQuota();
      /**
       * <code>required uint64 quota = 2;</code>
       * @return The quota.
       */
      long getQuota();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto}
     */
    public static final class QuotaByStorageTypeEntryProto extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
        QuotaByStorageTypeEntryProtoOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use QuotaByStorageTypeEntryProto.newBuilder() to construct.
      private QuotaByStorageTypeEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private QuotaByStorageTypeEntryProto() {
        storageType_ = 1;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new QuotaByStorageTypeEntryProto();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class);
      }

      private int bitField0_;
      public static final int STORAGETYPE_FIELD_NUMBER = 1;
      private int storageType_ = 1;
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
       * @return Whether the storageType field is set.
       */
      @java.lang.Override public boolean hasStorageType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
       * @return The storageType.
       */
      @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }

      public static final int QUOTA_FIELD_NUMBER = 2;
      private long quota_ = 0L;
      /**
       * <code>required uint64 quota = 2;</code>
       * @return Whether the quota field is set.
       */
      @java.lang.Override
      public boolean hasQuota() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @return The quota.
       */
      @java.lang.Override
      public long getQuota() {
        return quota_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (!hasStorageType()) {
          memoizedIsInitialized = 0;
          return false;
        }
        if (!hasQuota()) {
          memoizedIsInitialized = 0;
          return false;
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeEnum(1, storageType_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, quota_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSize(1, storageType_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, quota_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) obj;

        if (hasStorageType() != other.hasStorageType()) return false;
        if (hasStorageType()) {
          if (storageType_ != other.storageType_) return false;
        }
        if (hasQuota() != other.hasQuota()) return false;
        if (hasQuota()) {
          if (getQuota()
              != other.getQuota()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasStorageType()) {
          hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
          hash = (53 * hash) + storageType_;
        }
        if (hasQuota()) {
          hash = (37 * hash) + QUOTA_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getQuota());
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          storageType_ = 1;
          quota_ = 0L;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.storageType_ = storageType_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.quota_ = quota_;
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance()) return this;
          if (other.hasStorageType()) {
            setStorageType(other.getStorageType());
          }
          if (other.hasQuota()) {
            setQuota(other.getQuota());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (!hasStorageType()) {
            return false;
          }
          if (!hasQuota()) {
            return false;
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(1, tmpRaw);
                  } else {
                    storageType_ = tmpRaw;
                    bitField0_ |= 0x00000001;
                  }
                  break;
                } // case 8
                case 16: {
                  quota_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int storageType_ = 1;
        /**
         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
         * @return Whether the storageType field is set.
         */
        @java.lang.Override public boolean hasStorageType() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
         * @return The storageType.
         */
        @java.lang.Override
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
          return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
        }
        /**
         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
         * @param value The storageType to set.
         * @return This builder for chaining.
         */
        public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
          if (value == null) {
            throw new NullPointerException();
          }
          bitField0_ |= 0x00000001;
          storageType_ = value.getNumber();
          onChanged();
          return this;
        }
        /**
         * <code>required .hadoop.hdfs.StorageTypeProto storageType = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearStorageType() {
          bitField0_ = (bitField0_ & ~0x00000001);
          storageType_ = 1;
          onChanged();
          return this;
        }

        private long quota_ ;
        /**
         * <code>required uint64 quota = 2;</code>
         * @return Whether the quota field is set.
         */
        @java.lang.Override
        public boolean hasQuota() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>required uint64 quota = 2;</code>
         * @return The quota.
         */
        @java.lang.Override
        public long getQuota() {
          return quota_;
        }
        /**
         * <code>required uint64 quota = 2;</code>
         * @param value The quota to set.
         * @return This builder for chaining.
         */
        public Builder setQuota(long value) {

          quota_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>required uint64 quota = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearQuota() {
          bitField0_ = (bitField0_ & ~0x00000002);
          quota_ = 0L;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeEntryProto>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<QuotaByStorageTypeEntryProto>() {
        @java.lang.Override
        public QuotaByStorageTypeEntryProto parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeEntryProto> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeEntryProto> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface QuotaByStorageTypeFeatureProtoOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> 
          getQuotasList();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index);
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      int getQuotasCount();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
          getQuotasOrBuilderList();
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
          int index);
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto}
     */
    public static final class QuotaByStorageTypeFeatureProto extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
        QuotaByStorageTypeFeatureProtoOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use QuotaByStorageTypeFeatureProto.newBuilder() to construct.
      private QuotaByStorageTypeFeatureProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private QuotaByStorageTypeFeatureProto() {
        quotas_ = java.util.Collections.emptyList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new QuotaByStorageTypeFeatureProto();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class);
      }

      public static final int QUOTAS_FIELD_NUMBER = 1;
      @SuppressWarnings("serial")
      private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> quotas_;
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      @java.lang.Override
      public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> getQuotasList() {
        return quotas_;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      @java.lang.Override
      public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
          getQuotasOrBuilderList() {
        return quotas_;
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      @java.lang.Override
      public int getQuotasCount() {
        return quotas_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) {
        return quotas_.get(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
          int index) {
        return quotas_.get(index);
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        for (int i = 0; i < getQuotasCount(); i++) {
          if (!getQuotas(i).isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        for (int i = 0; i < quotas_.size(); i++) {
          output.writeMessage(1, quotas_.get(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        for (int i = 0; i < quotas_.size(); i++) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(1, quotas_.get(i));
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) obj;

        if (!getQuotasList()
            .equals(other.getQuotasList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (getQuotasCount() > 0) {
          hash = (37 * hash) + QUOTAS_FIELD_NUMBER;
          hash = (53 * hash) + getQuotasList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          if (quotasBuilder_ == null) {
            quotas_ = java.util.Collections.emptyList();
          } else {
            quotas_ = null;
            quotasBuilder_.clear();
          }
          bitField0_ = (bitField0_ & ~0x00000001);
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto(this);
          buildPartialRepeatedFields(result);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result) {
          if (quotasBuilder_ == null) {
            if (((bitField0_ & 0x00000001) != 0)) {
              quotas_ = java.util.Collections.unmodifiableList(quotas_);
              bitField0_ = (bitField0_ & ~0x00000001);
            }
            result.quotas_ = quotas_;
          } else {
            result.quotas_ = quotasBuilder_.build();
          }
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto result) {
          int from_bitField0_ = bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) return this;
          if (quotasBuilder_ == null) {
            if (!other.quotas_.isEmpty()) {
              if (quotas_.isEmpty()) {
                quotas_ = other.quotas_;
                bitField0_ = (bitField0_ & ~0x00000001);
              } else {
                ensureQuotasIsMutable();
                quotas_.addAll(other.quotas_);
              }
              onChanged();
            }
          } else {
            if (!other.quotas_.isEmpty()) {
              if (quotasBuilder_.isEmpty()) {
                quotasBuilder_.dispose();
                quotasBuilder_ = null;
                quotas_ = other.quotas_;
                bitField0_ = (bitField0_ & ~0x00000001);
                quotasBuilder_ = 
                  org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                     getQuotasFieldBuilder() : null;
              } else {
                quotasBuilder_.addAllMessages(other.quotas_);
              }
            }
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          for (int i = 0; i < getQuotasCount(); i++) {
            if (!getQuotas(i).isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 10: {
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto m =
                      input.readMessage(
                          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.PARSER,
                          extensionRegistry);
                  if (quotasBuilder_ == null) {
                    ensureQuotasIsMutable();
                    quotas_.add(m);
                  } else {
                    quotasBuilder_.addMessage(m);
                  }
                  break;
                } // case 10
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> quotas_ =
          java.util.Collections.emptyList();
        private void ensureQuotasIsMutable() {
          if (!((bitField0_ & 0x00000001) != 0)) {
            quotas_ = new java.util.ArrayList<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto>(quotas_);
            bitField0_ |= 0x00000001;
           }
        }

        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> quotasBuilder_;

        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> getQuotasList() {
          if (quotasBuilder_ == null) {
            return java.util.Collections.unmodifiableList(quotas_);
          } else {
            return quotasBuilder_.getMessageList();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public int getQuotasCount() {
          if (quotasBuilder_ == null) {
            return quotas_.size();
          } else {
            return quotasBuilder_.getCount();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto getQuotas(int index) {
          if (quotasBuilder_ == null) {
            return quotas_.get(index);
          } else {
            return quotasBuilder_.getMessage(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder setQuotas(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
          if (quotasBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureQuotasIsMutable();
            quotas_.set(index, value);
            onChanged();
          } else {
            quotasBuilder_.setMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder setQuotas(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
          if (quotasBuilder_ == null) {
            ensureQuotasIsMutable();
            quotas_.set(index, builderForValue.build());
            onChanged();
          } else {
            quotasBuilder_.setMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder addQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
          if (quotasBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureQuotasIsMutable();
            quotas_.add(value);
            onChanged();
          } else {
            quotasBuilder_.addMessage(value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder addQuotas(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto value) {
          if (quotasBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureQuotasIsMutable();
            quotas_.add(index, value);
            onChanged();
          } else {
            quotasBuilder_.addMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder addQuotas(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
          if (quotasBuilder_ == null) {
            ensureQuotasIsMutable();
            quotas_.add(builderForValue.build());
            onChanged();
          } else {
            quotasBuilder_.addMessage(builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder addQuotas(
            int index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder builderForValue) {
          if (quotasBuilder_ == null) {
            ensureQuotasIsMutable();
            quotas_.add(index, builderForValue.build());
            onChanged();
          } else {
            quotasBuilder_.addMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder addAllQuotas(
            java.lang.Iterable<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto> values) {
          if (quotasBuilder_ == null) {
            ensureQuotasIsMutable();
            org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
                values, quotas_);
            onChanged();
          } else {
            quotasBuilder_.addAllMessages(values);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder clearQuotas() {
          if (quotasBuilder_ == null) {
            quotas_ = java.util.Collections.emptyList();
            bitField0_ = (bitField0_ & ~0x00000001);
            onChanged();
          } else {
            quotasBuilder_.clear();
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public Builder removeQuotas(int index) {
          if (quotasBuilder_ == null) {
            ensureQuotasIsMutable();
            quotas_.remove(index);
            onChanged();
          } else {
            quotasBuilder_.remove(index);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder getQuotasBuilder(
            int index) {
          return getQuotasFieldBuilder().getBuilder(index);
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder getQuotasOrBuilder(
            int index) {
          if (quotasBuilder_ == null) {
            return quotas_.get(index);  } else {
            return quotasBuilder_.getMessageOrBuilder(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public java.util.List<? extends org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
             getQuotasOrBuilderList() {
          if (quotasBuilder_ != null) {
            return quotasBuilder_.getMessageOrBuilderList();
          } else {
            return java.util.Collections.unmodifiableList(quotas_);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder() {
          return getQuotasFieldBuilder().addBuilder(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder addQuotasBuilder(
            int index) {
          return getQuotasFieldBuilder().addBuilder(
              index, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeEntryProto quotas = 1;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder> 
             getQuotasBuilderList() {
          return getQuotasFieldBuilder().getBuilderList();
        }
        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder> 
            getQuotasFieldBuilder() {
          if (quotasBuilder_ == null) {
            quotasBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProtoOrBuilder>(
                    quotas_,
                    ((bitField0_ & 0x00000001) != 0),
                    getParentForChildren(),
                    isClean());
            quotas_ = null;
          }
          return quotasBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeFeatureProto>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<QuotaByStorageTypeFeatureProto>() {
        @java.lang.Override
        public QuotaByStorageTypeFeatureProto parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeFeatureProto> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<QuotaByStorageTypeFeatureProto> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface INodeDirectoryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint64 modificationTime = 1;</code>
       * @return Whether the modificationTime field is set.
       */
      boolean hasModificationTime();
      /**
       * <code>optional uint64 modificationTime = 1;</code>
       * @return The modificationTime.
       */
      long getModificationTime();

      /**
       * <pre>
       * namespace quota
       * </pre>
       *
       * <code>optional uint64 nsQuota = 2;</code>
       * @return Whether the nsQuota field is set.
       */
      boolean hasNsQuota();
      /**
       * <pre>
       * namespace quota
       * </pre>
       *
       * <code>optional uint64 nsQuota = 2;</code>
       * @return The nsQuota.
       */
      long getNsQuota();

      /**
       * <pre>
       * diskspace quota
       * </pre>
       *
       * <code>optional uint64 dsQuota = 3;</code>
       * @return Whether the dsQuota field is set.
       */
      boolean hasDsQuota();
      /**
       * <pre>
       * diskspace quota
       * </pre>
       *
       * <code>optional uint64 dsQuota = 3;</code>
       * @return The dsQuota.
       */
      long getDsQuota();

      /**
       * <code>optional fixed64 permission = 4;</code>
       * @return Whether the permission field is set.
       */
      boolean hasPermission();
      /**
       * <code>optional fixed64 permission = 4;</code>
       * @return The permission.
       */
      long getPermission();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       * @return Whether the acl field is set.
       */
      boolean hasAcl();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       * @return The acl.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       * @return Whether the xAttrs field is set.
       */
      boolean hasXAttrs();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       * @return The xAttrs.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       * @return Whether the typeQuotas field is set.
       */
      boolean hasTypeQuotas();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       * @return The typeQuotas.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory}
     */
    public static final class INodeDirectory extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
        INodeDirectoryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use INodeDirectory.newBuilder() to construct.
      private INodeDirectory(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private INodeDirectory() {
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new INodeDirectory();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class);
      }

      private int bitField0_;
      public static final int MODIFICATIONTIME_FIELD_NUMBER = 1;
      private long modificationTime_ = 0L;
      /**
       * <code>optional uint64 modificationTime = 1;</code>
       * @return Whether the modificationTime field is set.
       */
      @java.lang.Override
      public boolean hasModificationTime() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 modificationTime = 1;</code>
       * @return The modificationTime.
       */
      @java.lang.Override
      public long getModificationTime() {
        return modificationTime_;
      }

      public static final int NSQUOTA_FIELD_NUMBER = 2;
      private long nsQuota_ = 0L;
      /**
       * <pre>
       * namespace quota
       * </pre>
       *
       * <code>optional uint64 nsQuota = 2;</code>
       * @return Whether the nsQuota field is set.
       */
      @java.lang.Override
      public boolean hasNsQuota() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * namespace quota
       * </pre>
       *
       * <code>optional uint64 nsQuota = 2;</code>
       * @return The nsQuota.
       */
      @java.lang.Override
      public long getNsQuota() {
        return nsQuota_;
      }

      public static final int DSQUOTA_FIELD_NUMBER = 3;
      private long dsQuota_ = 0L;
      /**
       * <pre>
       * diskspace quota
       * </pre>
       *
       * <code>optional uint64 dsQuota = 3;</code>
       * @return Whether the dsQuota field is set.
       */
      @java.lang.Override
      public boolean hasDsQuota() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * diskspace quota
       * </pre>
       *
       * <code>optional uint64 dsQuota = 3;</code>
       * @return The dsQuota.
       */
      @java.lang.Override
      public long getDsQuota() {
        return dsQuota_;
      }

      public static final int PERMISSION_FIELD_NUMBER = 4;
      private long permission_ = 0L;
      /**
       * <code>optional fixed64 permission = 4;</code>
       * @return Whether the permission field is set.
       */
      @java.lang.Override
      public boolean hasPermission() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional fixed64 permission = 4;</code>
       * @return The permission.
       */
      @java.lang.Override
      public long getPermission() {
        return permission_;
      }

      public static final int ACL_FIELD_NUMBER = 5;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       * @return Whether the acl field is set.
       */
      @java.lang.Override
      public boolean hasAcl() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       * @return The acl.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
        return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
        return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
      }

      public static final int XATTRS_FIELD_NUMBER = 6;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       * @return Whether the xAttrs field is set.
       */
      @java.lang.Override
      public boolean hasXAttrs() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       * @return The xAttrs.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
        return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
        return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
      }

      public static final int TYPEQUOTAS_FIELD_NUMBER = 7;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       * @return Whether the typeQuotas field is set.
       */
      @java.lang.Override
      public boolean hasTypeQuotas() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       * @return The typeQuotas.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() {
        return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() {
        return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (hasXAttrs()) {
          if (!getXAttrs().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        if (hasTypeQuotas()) {
          if (!getTypeQuotas().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt64(1, modificationTime_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, nsQuota_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt64(3, dsQuota_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeFixed64(4, permission_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          output.writeMessage(5, getAcl());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          output.writeMessage(6, getXAttrs());
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          output.writeMessage(7, getTypeQuotas());
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(1, modificationTime_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, nsQuota_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(3, dsQuota_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeFixed64Size(4, permission_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(5, getAcl());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(6, getXAttrs());
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(7, getTypeQuotas());
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) obj;

        if (hasModificationTime() != other.hasModificationTime()) return false;
        if (hasModificationTime()) {
          if (getModificationTime()
              != other.getModificationTime()) return false;
        }
        if (hasNsQuota() != other.hasNsQuota()) return false;
        if (hasNsQuota()) {
          if (getNsQuota()
              != other.getNsQuota()) return false;
        }
        if (hasDsQuota() != other.hasDsQuota()) return false;
        if (hasDsQuota()) {
          if (getDsQuota()
              != other.getDsQuota()) return false;
        }
        if (hasPermission() != other.hasPermission()) return false;
        if (hasPermission()) {
          if (getPermission()
              != other.getPermission()) return false;
        }
        if (hasAcl() != other.hasAcl()) return false;
        if (hasAcl()) {
          if (!getAcl()
              .equals(other.getAcl())) return false;
        }
        if (hasXAttrs() != other.hasXAttrs()) return false;
        if (hasXAttrs()) {
          if (!getXAttrs()
              .equals(other.getXAttrs())) return false;
        }
        if (hasTypeQuotas() != other.hasTypeQuotas()) return false;
        if (hasTypeQuotas()) {
          if (!getTypeQuotas()
              .equals(other.getTypeQuotas())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasModificationTime()) {
          hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getModificationTime());
        }
        if (hasNsQuota()) {
          hash = (37 * hash) + NSQUOTA_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getNsQuota());
        }
        if (hasDsQuota()) {
          hash = (37 * hash) + DSQUOTA_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getDsQuota());
        }
        if (hasPermission()) {
          hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getPermission());
        }
        if (hasAcl()) {
          hash = (37 * hash) + ACL_FIELD_NUMBER;
          hash = (53 * hash) + getAcl().hashCode();
        }
        if (hasXAttrs()) {
          hash = (37 * hash) + XATTRS_FIELD_NUMBER;
          hash = (53 * hash) + getXAttrs().hashCode();
        }
        if (hasTypeQuotas()) {
          hash = (37 * hash) + TYPEQUOTAS_FIELD_NUMBER;
          hash = (53 * hash) + getTypeQuotas().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeDirectory}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getAclFieldBuilder();
            getXAttrsFieldBuilder();
            getTypeQuotasFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          modificationTime_ = 0L;
          nsQuota_ = 0L;
          dsQuota_ = 0L;
          permission_ = 0L;
          acl_ = null;
          if (aclBuilder_ != null) {
            aclBuilder_.dispose();
            aclBuilder_ = null;
          }
          xAttrs_ = null;
          if (xAttrsBuilder_ != null) {
            xAttrsBuilder_.dispose();
            xAttrsBuilder_ = null;
          }
          typeQuotas_ = null;
          if (typeQuotasBuilder_ != null) {
            typeQuotasBuilder_.dispose();
            typeQuotasBuilder_ = null;
          }
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.modificationTime_ = modificationTime_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.nsQuota_ = nsQuota_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.dsQuota_ = dsQuota_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.permission_ = permission_;
            to_bitField0_ |= 0x00000008;
          }
          if (((from_bitField0_ & 0x00000010) != 0)) {
            result.acl_ = aclBuilder_ == null
                ? acl_
                : aclBuilder_.build();
            to_bitField0_ |= 0x00000010;
          }
          if (((from_bitField0_ & 0x00000020) != 0)) {
            result.xAttrs_ = xAttrsBuilder_ == null
                ? xAttrs_
                : xAttrsBuilder_.build();
            to_bitField0_ |= 0x00000020;
          }
          if (((from_bitField0_ & 0x00000040) != 0)) {
            result.typeQuotas_ = typeQuotasBuilder_ == null
                ? typeQuotas_
                : typeQuotasBuilder_.build();
            to_bitField0_ |= 0x00000040;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) return this;
          if (other.hasModificationTime()) {
            setModificationTime(other.getModificationTime());
          }
          if (other.hasNsQuota()) {
            setNsQuota(other.getNsQuota());
          }
          if (other.hasDsQuota()) {
            setDsQuota(other.getDsQuota());
          }
          if (other.hasPermission()) {
            setPermission(other.getPermission());
          }
          if (other.hasAcl()) {
            mergeAcl(other.getAcl());
          }
          if (other.hasXAttrs()) {
            mergeXAttrs(other.getXAttrs());
          }
          if (other.hasTypeQuotas()) {
            mergeTypeQuotas(other.getTypeQuotas());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (hasXAttrs()) {
            if (!getXAttrs().isInitialized()) {
              return false;
            }
          }
          if (hasTypeQuotas()) {
            if (!getTypeQuotas().isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  modificationTime_ = input.readUInt64();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  nsQuota_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 24: {
                  dsQuota_ = input.readUInt64();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                case 33: {
                  permission_ = input.readFixed64();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 33
                case 42: {
                  input.readMessage(
                      getAclFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000010;
                  break;
                } // case 42
                case 50: {
                  input.readMessage(
                      getXAttrsFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000020;
                  break;
                } // case 50
                case 58: {
                  input.readMessage(
                      getTypeQuotasFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000040;
                  break;
                } // case 58
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private long modificationTime_ ;
        /**
         * <code>optional uint64 modificationTime = 1;</code>
         * @return Whether the modificationTime field is set.
         */
        @java.lang.Override
        public boolean hasModificationTime() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint64 modificationTime = 1;</code>
         * @return The modificationTime.
         */
        @java.lang.Override
        public long getModificationTime() {
          return modificationTime_;
        }
        /**
         * <code>optional uint64 modificationTime = 1;</code>
         * @param value The modificationTime to set.
         * @return This builder for chaining.
         */
        public Builder setModificationTime(long value) {

          modificationTime_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 modificationTime = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearModificationTime() {
          bitField0_ = (bitField0_ & ~0x00000001);
          modificationTime_ = 0L;
          onChanged();
          return this;
        }

        private long nsQuota_ ;
        /**
         * <pre>
         * namespace quota
         * </pre>
         *
         * <code>optional uint64 nsQuota = 2;</code>
         * @return Whether the nsQuota field is set.
         */
        @java.lang.Override
        public boolean hasNsQuota() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <pre>
         * namespace quota
         * </pre>
         *
         * <code>optional uint64 nsQuota = 2;</code>
         * @return The nsQuota.
         */
        @java.lang.Override
        public long getNsQuota() {
          return nsQuota_;
        }
        /**
         * <pre>
         * namespace quota
         * </pre>
         *
         * <code>optional uint64 nsQuota = 2;</code>
         * @param value The nsQuota to set.
         * @return This builder for chaining.
         */
        public Builder setNsQuota(long value) {

          nsQuota_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * namespace quota
         * </pre>
         *
         * <code>optional uint64 nsQuota = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearNsQuota() {
          bitField0_ = (bitField0_ & ~0x00000002);
          nsQuota_ = 0L;
          onChanged();
          return this;
        }

        private long dsQuota_ ;
        /**
         * <pre>
         * diskspace quota
         * </pre>
         *
         * <code>optional uint64 dsQuota = 3;</code>
         * @return Whether the dsQuota field is set.
         */
        @java.lang.Override
        public boolean hasDsQuota() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <pre>
         * diskspace quota
         * </pre>
         *
         * <code>optional uint64 dsQuota = 3;</code>
         * @return The dsQuota.
         */
        @java.lang.Override
        public long getDsQuota() {
          return dsQuota_;
        }
        /**
         * <pre>
         * diskspace quota
         * </pre>
         *
         * <code>optional uint64 dsQuota = 3;</code>
         * @param value The dsQuota to set.
         * @return This builder for chaining.
         */
        public Builder setDsQuota(long value) {

          dsQuota_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * diskspace quota
         * </pre>
         *
         * <code>optional uint64 dsQuota = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearDsQuota() {
          bitField0_ = (bitField0_ & ~0x00000004);
          dsQuota_ = 0L;
          onChanged();
          return this;
        }

        private long permission_ ;
        /**
         * <code>optional fixed64 permission = 4;</code>
         * @return Whether the permission field is set.
         */
        @java.lang.Override
        public boolean hasPermission() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional fixed64 permission = 4;</code>
         * @return The permission.
         */
        @java.lang.Override
        public long getPermission() {
          return permission_;
        }
        /**
         * <code>optional fixed64 permission = 4;</code>
         * @param value The permission to set.
         * @return This builder for chaining.
         */
        public Builder setPermission(long value) {

          permission_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional fixed64 permission = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearPermission() {
          bitField0_ = (bitField0_ & ~0x00000008);
          permission_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto acl_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> aclBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         * @return Whether the acl field is set.
         */
        public boolean hasAcl() {
          return ((bitField0_ & 0x00000010) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         * @return The acl.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto getAcl() {
          if (aclBuilder_ == null) {
            return acl_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
          } else {
            return aclBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public Builder setAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
          if (aclBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            acl_ = value;
          } else {
            aclBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public Builder setAcl(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder builderForValue) {
          if (aclBuilder_ == null) {
            acl_ = builderForValue.build();
          } else {
            aclBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public Builder mergeAcl(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto value) {
          if (aclBuilder_ == null) {
            if (((bitField0_ & 0x00000010) != 0) &&
              acl_ != null &&
              acl_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance()) {
              getAclBuilder().mergeFrom(value);
            } else {
              acl_ = value;
            }
          } else {
            aclBuilder_.mergeFrom(value);
          }
          if (acl_ != null) {
            bitField0_ |= 0x00000010;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public Builder clearAcl() {
          bitField0_ = (bitField0_ & ~0x00000010);
          acl_ = null;
          if (aclBuilder_ != null) {
            aclBuilder_.dispose();
            aclBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder getAclBuilder() {
          bitField0_ |= 0x00000010;
          onChanged();
          return getAclFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder getAclOrBuilder() {
          if (aclBuilder_ != null) {
            return aclBuilder_.getMessageOrBuilder();
          } else {
            return acl_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.getDefaultInstance() : acl_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 5;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder> 
            getAclFieldBuilder() {
          if (aclBuilder_ == null) {
            aclBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProtoOrBuilder>(
                    getAcl(),
                    getParentForChildren(),
                    isClean());
            acl_ = null;
          }
          return aclBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto xAttrs_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> xAttrsBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         * @return Whether the xAttrs field is set.
         */
        public boolean hasXAttrs() {
          return ((bitField0_ & 0x00000020) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         * @return The xAttrs.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto getXAttrs() {
          if (xAttrsBuilder_ == null) {
            return xAttrs_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
          } else {
            return xAttrsBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public Builder setXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
          if (xAttrsBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            xAttrs_ = value;
          } else {
            xAttrsBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public Builder setXAttrs(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder builderForValue) {
          if (xAttrsBuilder_ == null) {
            xAttrs_ = builderForValue.build();
          } else {
            xAttrsBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public Builder mergeXAttrs(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto value) {
          if (xAttrsBuilder_ == null) {
            if (((bitField0_ & 0x00000020) != 0) &&
              xAttrs_ != null &&
              xAttrs_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance()) {
              getXAttrsBuilder().mergeFrom(value);
            } else {
              xAttrs_ = value;
            }
          } else {
            xAttrsBuilder_.mergeFrom(value);
          }
          if (xAttrs_ != null) {
            bitField0_ |= 0x00000020;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public Builder clearXAttrs() {
          bitField0_ = (bitField0_ & ~0x00000020);
          xAttrs_ = null;
          if (xAttrsBuilder_ != null) {
            xAttrsBuilder_.dispose();
            xAttrsBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder getXAttrsBuilder() {
          bitField0_ |= 0x00000020;
          onChanged();
          return getXAttrsFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder getXAttrsOrBuilder() {
          if (xAttrsBuilder_ != null) {
            return xAttrsBuilder_.getMessageOrBuilder();
          } else {
            return xAttrs_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.getDefaultInstance() : xAttrs_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.XAttrFeatureProto xAttrs = 6;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder> 
            getXAttrsFieldBuilder() {
          if (xAttrsBuilder_ == null) {
            xAttrsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProtoOrBuilder>(
                    getXAttrs(),
                    getParentForChildren(),
                    isClean());
            xAttrs_ = null;
          }
          return xAttrsBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> typeQuotasBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         * @return Whether the typeQuotas field is set.
         */
        public boolean hasTypeQuotas() {
          return ((bitField0_ & 0x00000040) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         * @return The typeQuotas.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto getTypeQuotas() {
          if (typeQuotasBuilder_ == null) {
            return typeQuotas_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_;
          } else {
            return typeQuotasBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public Builder setTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) {
          if (typeQuotasBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            typeQuotas_ = value;
          } else {
            typeQuotasBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public Builder setTypeQuotas(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder builderForValue) {
          if (typeQuotasBuilder_ == null) {
            typeQuotas_ = builderForValue.build();
          } else {
            typeQuotasBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public Builder mergeTypeQuotas(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto value) {
          if (typeQuotasBuilder_ == null) {
            if (((bitField0_ & 0x00000040) != 0) &&
              typeQuotas_ != null &&
              typeQuotas_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance()) {
              getTypeQuotasBuilder().mergeFrom(value);
            } else {
              typeQuotas_ = value;
            }
          } else {
            typeQuotasBuilder_.mergeFrom(value);
          }
          if (typeQuotas_ != null) {
            bitField0_ |= 0x00000040;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public Builder clearTypeQuotas() {
          bitField0_ = (bitField0_ & ~0x00000040);
          typeQuotas_ = null;
          if (typeQuotasBuilder_ != null) {
            typeQuotasBuilder_.dispose();
            typeQuotasBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder getTypeQuotasBuilder() {
          bitField0_ |= 0x00000040;
          onChanged();
          return getTypeQuotasFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder getTypeQuotasOrBuilder() {
          if (typeQuotasBuilder_ != null) {
            return typeQuotasBuilder_.getMessageOrBuilder();
          } else {
            return typeQuotas_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.getDefaultInstance() : typeQuotas_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.QuotaByStorageTypeFeatureProto typeQuotas = 7;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder> 
            getTypeQuotasFieldBuilder() {
          if (typeQuotasBuilder_ == null) {
            typeQuotasBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProtoOrBuilder>(
                    getTypeQuotas(),
                    getParentForChildren(),
                    isClean());
            typeQuotas_ = null;
          }
          return typeQuotasBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeDirectory)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectory>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeDirectory>() {
        @java.lang.Override
        public INodeDirectory parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectory> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectory> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface INodeSymlinkOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional fixed64 permission = 1;</code>
       * @return Whether the permission field is set.
       */
      boolean hasPermission();
      /**
       * <code>optional fixed64 permission = 1;</code>
       * @return The permission.
       */
      long getPermission();

      /**
       * <code>optional bytes target = 2;</code>
       * @return Whether the target field is set.
       */
      boolean hasTarget();
      /**
       * <code>optional bytes target = 2;</code>
       * @return The target.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getTarget();

      /**
       * <code>optional uint64 modificationTime = 3;</code>
       * @return Whether the modificationTime field is set.
       */
      boolean hasModificationTime();
      /**
       * <code>optional uint64 modificationTime = 3;</code>
       * @return The modificationTime.
       */
      long getModificationTime();

      /**
       * <code>optional uint64 accessTime = 4;</code>
       * @return Whether the accessTime field is set.
       */
      boolean hasAccessTime();
      /**
       * <code>optional uint64 accessTime = 4;</code>
       * @return The accessTime.
       */
      long getAccessTime();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink}
     */
    public static final class INodeSymlink extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
        INodeSymlinkOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use INodeSymlink.newBuilder() to construct.
      private INodeSymlink(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private INodeSymlink() {
        target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new INodeSymlink();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class);
      }

      private int bitField0_;
      public static final int PERMISSION_FIELD_NUMBER = 1;
      private long permission_ = 0L;
      /**
       * <code>optional fixed64 permission = 1;</code>
       * @return Whether the permission field is set.
       */
      @java.lang.Override
      public boolean hasPermission() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional fixed64 permission = 1;</code>
       * @return The permission.
       */
      @java.lang.Override
      public long getPermission() {
        return permission_;
      }

      public static final int TARGET_FIELD_NUMBER = 2;
      private org.apache.hadoop.thirdparty.protobuf.ByteString target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes target = 2;</code>
       * @return Whether the target field is set.
       */
      @java.lang.Override
      public boolean hasTarget() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bytes target = 2;</code>
       * @return The target.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getTarget() {
        return target_;
      }

      public static final int MODIFICATIONTIME_FIELD_NUMBER = 3;
      private long modificationTime_ = 0L;
      /**
       * <code>optional uint64 modificationTime = 3;</code>
       * @return Whether the modificationTime field is set.
       */
      @java.lang.Override
      public boolean hasModificationTime() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 modificationTime = 3;</code>
       * @return The modificationTime.
       */
      @java.lang.Override
      public long getModificationTime() {
        return modificationTime_;
      }

      public static final int ACCESSTIME_FIELD_NUMBER = 4;
      private long accessTime_ = 0L;
      /**
       * <code>optional uint64 accessTime = 4;</code>
       * @return Whether the accessTime field is set.
       */
      @java.lang.Override
      public boolean hasAccessTime() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint64 accessTime = 4;</code>
       * @return The accessTime.
       */
      @java.lang.Override
      public long getAccessTime() {
        return accessTime_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeFixed64(1, permission_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeBytes(2, target_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt64(3, modificationTime_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeUInt64(4, accessTime_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeFixed64Size(1, permission_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(2, target_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(3, modificationTime_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(4, accessTime_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) obj;

        if (hasPermission() != other.hasPermission()) return false;
        if (hasPermission()) {
          if (getPermission()
              != other.getPermission()) return false;
        }
        if (hasTarget() != other.hasTarget()) return false;
        if (hasTarget()) {
          if (!getTarget()
              .equals(other.getTarget())) return false;
        }
        if (hasModificationTime() != other.hasModificationTime()) return false;
        if (hasModificationTime()) {
          if (getModificationTime()
              != other.getModificationTime()) return false;
        }
        if (hasAccessTime() != other.hasAccessTime()) return false;
        if (hasAccessTime()) {
          if (getAccessTime()
              != other.getAccessTime()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasPermission()) {
          hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getPermission());
        }
        if (hasTarget()) {
          hash = (37 * hash) + TARGET_FIELD_NUMBER;
          hash = (53 * hash) + getTarget().hashCode();
        }
        if (hasModificationTime()) {
          hash = (37 * hash) + MODIFICATIONTIME_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getModificationTime());
        }
        if (hasAccessTime()) {
          hash = (37 * hash) + ACCESSTIME_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getAccessTime());
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INodeSymlink}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          permission_ = 0L;
          target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          modificationTime_ = 0L;
          accessTime_ = 0L;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.permission_ = permission_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.target_ = target_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.modificationTime_ = modificationTime_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.accessTime_ = accessTime_;
            to_bitField0_ |= 0x00000008;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) return this;
          if (other.hasPermission()) {
            setPermission(other.getPermission());
          }
          if (other.hasTarget()) {
            setTarget(other.getTarget());
          }
          if (other.hasModificationTime()) {
            setModificationTime(other.getModificationTime());
          }
          if (other.hasAccessTime()) {
            setAccessTime(other.getAccessTime());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 9: {
                  permission_ = input.readFixed64();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 9
                case 18: {
                  target_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                case 24: {
                  modificationTime_ = input.readUInt64();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                case 32: {
                  accessTime_ = input.readUInt64();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 32
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private long permission_ ;
        /**
         * <code>optional fixed64 permission = 1;</code>
         * @return Whether the permission field is set.
         */
        @java.lang.Override
        public boolean hasPermission() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional fixed64 permission = 1;</code>
         * @return The permission.
         */
        @java.lang.Override
        public long getPermission() {
          return permission_;
        }
        /**
         * <code>optional fixed64 permission = 1;</code>
         * @param value The permission to set.
         * @return This builder for chaining.
         */
        public Builder setPermission(long value) {

          permission_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional fixed64 permission = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearPermission() {
          bitField0_ = (bitField0_ & ~0x00000001);
          permission_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString target_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes target = 2;</code>
         * @return Whether the target field is set.
         */
        @java.lang.Override
        public boolean hasTarget() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional bytes target = 2;</code>
         * @return The target.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getTarget() {
          return target_;
        }
        /**
         * <code>optional bytes target = 2;</code>
         * @param value The target to set.
         * @return This builder for chaining.
         */
        public Builder setTarget(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          target_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes target = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearTarget() {
          bitField0_ = (bitField0_ & ~0x00000002);
          target_ = getDefaultInstance().getTarget();
          onChanged();
          return this;
        }

        private long modificationTime_ ;
        /**
         * <code>optional uint64 modificationTime = 3;</code>
         * @return Whether the modificationTime field is set.
         */
        @java.lang.Override
        public boolean hasModificationTime() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional uint64 modificationTime = 3;</code>
         * @return The modificationTime.
         */
        @java.lang.Override
        public long getModificationTime() {
          return modificationTime_;
        }
        /**
         * <code>optional uint64 modificationTime = 3;</code>
         * @param value The modificationTime to set.
         * @return This builder for chaining.
         */
        public Builder setModificationTime(long value) {

          modificationTime_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 modificationTime = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearModificationTime() {
          bitField0_ = (bitField0_ & ~0x00000004);
          modificationTime_ = 0L;
          onChanged();
          return this;
        }

        private long accessTime_ ;
        /**
         * <code>optional uint64 accessTime = 4;</code>
         * @return Whether the accessTime field is set.
         */
        @java.lang.Override
        public boolean hasAccessTime() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional uint64 accessTime = 4;</code>
         * @return The accessTime.
         */
        @java.lang.Override
        public long getAccessTime() {
          return accessTime_;
        }
        /**
         * <code>optional uint64 accessTime = 4;</code>
         * @param value The accessTime to set.
         * @return This builder for chaining.
         */
        public Builder setAccessTime(long value) {

          accessTime_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 accessTime = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearAccessTime() {
          bitField0_ = (bitField0_ & ~0x00000008);
          accessTime_ = 0L;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INodeSymlink)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeSymlink>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeSymlink>() {
        @java.lang.Override
        public INodeSymlink parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeSymlink> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<INodeSymlink> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface INodeOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeSection.INode)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
       * @return Whether the type field is set.
       */
      boolean hasType();
      /**
       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
       * @return The type.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType();

      /**
       * <code>required uint64 id = 2;</code>
       * @return Whether the id field is set.
       */
      boolean hasId();
      /**
       * <code>required uint64 id = 2;</code>
       * @return The id.
       */
      long getId();

      /**
       * <code>optional bytes name = 3;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <code>optional bytes name = 3;</code>
       * @return The name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getName();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       * @return Whether the file field is set.
       */
      boolean hasFile();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       * @return The file.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       * @return Whether the directory field is set.
       */
      boolean hasDirectory();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       * @return The directory.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       * @return Whether the symlink field is set.
       */
      boolean hasSymlink();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       * @return The symlink.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode}
     */
    public static final class INode extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeSection.INode)
        INodeOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use INode.newBuilder() to construct.
      private INode(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private INode() {
        type_ = 1;
        name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new INode();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class);
      }

      /**
       * Protobuf enum {@code hadoop.hdfs.fsimage.INodeSection.INode.Type}
       */
      public enum Type
          implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
        /**
         * <code>FILE = 1;</code>
         */
        FILE(1),
        /**
         * <code>DIRECTORY = 2;</code>
         */
        DIRECTORY(2),
        /**
         * <code>SYMLINK = 3;</code>
         */
        SYMLINK(3),
        ;

        /**
         * <code>FILE = 1;</code>
         */
        public static final int FILE_VALUE = 1;
        /**
         * <code>DIRECTORY = 2;</code>
         */
        public static final int DIRECTORY_VALUE = 2;
        /**
         * <code>SYMLINK = 3;</code>
         */
        public static final int SYMLINK_VALUE = 3;


        public final int getNumber() {
          return value;
        }

        /**
         * @param value The numeric wire value of the corresponding enum entry.
         * @return The enum associated with the given numeric wire value.
         * @deprecated Use {@link #forNumber(int)} instead.
         */
        @java.lang.Deprecated
        public static Type valueOf(int value) {
          return forNumber(value);
        }

        /**
         * @param value The numeric wire value of the corresponding enum entry.
         * @return The enum associated with the given numeric wire value.
         */
        public static Type forNumber(int value) {
          switch (value) {
            case 1: return FILE;
            case 2: return DIRECTORY;
            case 3: return SYMLINK;
            default: return null;
          }
        }

        public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>
            internalGetValueMap() {
          return internalValueMap;
        }
        private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
            Type> internalValueMap =
              new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>() {
                public Type findValueByNumber(int number) {
                  return Type.forNumber(number);
                }
              };

        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
            getValueDescriptor() {
          return getDescriptor().getValues().get(ordinal());
        }
        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDescriptor().getEnumTypes().get(0);
        }

        private static final Type[] VALUES = values();

        public static Type valueOf(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
          if (desc.getType() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "EnumValueDescriptor is not for this type.");
          }
          return VALUES[desc.getIndex()];
        }

        private final int value;

        private Type(int value) {
          this.value = value;
        }

        // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.INodeSection.INode.Type)
      }

      private int bitField0_;
      public static final int TYPE_FIELD_NUMBER = 1;
      private int type_ = 1;
      /**
       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
       * @return The type.
       */
      @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE : result;
      }

      public static final int ID_FIELD_NUMBER = 2;
      private long id_ = 0L;
      /**
       * <code>required uint64 id = 2;</code>
       * @return Whether the id field is set.
       */
      @java.lang.Override
      public boolean hasId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 id = 2;</code>
       * @return The id.
       */
      @java.lang.Override
      public long getId() {
        return id_;
      }

      public static final int NAME_FIELD_NUMBER = 3;
      private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes name = 3;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes name = 3;</code>
       * @return The name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
        return name_;
      }

      public static final int FILE_FIELD_NUMBER = 4;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       * @return Whether the file field is set.
       */
      @java.lang.Override
      public boolean hasFile() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       * @return The file.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() {
        return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() {
        return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_;
      }

      public static final int DIRECTORY_FIELD_NUMBER = 5;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       * @return Whether the directory field is set.
       */
      @java.lang.Override
      public boolean hasDirectory() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       * @return The directory.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() {
        return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() {
        return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_;
      }

      public static final int SYMLINK_FIELD_NUMBER = 6;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       * @return Whether the symlink field is set.
       */
      @java.lang.Override
      public boolean hasSymlink() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       * @return The symlink.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() {
        return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() {
        return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (!hasType()) {
          memoizedIsInitialized = 0;
          return false;
        }
        if (!hasId()) {
          memoizedIsInitialized = 0;
          return false;
        }
        if (hasFile()) {
          if (!getFile().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        if (hasDirectory()) {
          if (!getDirectory().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeEnum(1, type_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, id_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeBytes(3, name_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeMessage(4, getFile());
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          output.writeMessage(5, getDirectory());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          output.writeMessage(6, getSymlink());
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSize(1, type_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, id_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(3, name_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(4, getFile());
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(5, getDirectory());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(6, getSymlink());
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) obj;

        if (hasType() != other.hasType()) return false;
        if (hasType()) {
          if (type_ != other.type_) return false;
        }
        if (hasId() != other.hasId()) return false;
        if (hasId()) {
          if (getId()
              != other.getId()) return false;
        }
        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (hasFile() != other.hasFile()) return false;
        if (hasFile()) {
          if (!getFile()
              .equals(other.getFile())) return false;
        }
        if (hasDirectory() != other.hasDirectory()) return false;
        if (hasDirectory()) {
          if (!getDirectory()
              .equals(other.getDirectory())) return false;
        }
        if (hasSymlink() != other.hasSymlink()) return false;
        if (hasSymlink()) {
          if (!getSymlink()
              .equals(other.getSymlink())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasType()) {
          hash = (37 * hash) + TYPE_FIELD_NUMBER;
          hash = (53 * hash) + type_;
        }
        if (hasId()) {
          hash = (37 * hash) + ID_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getId());
        }
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        if (hasFile()) {
          hash = (37 * hash) + FILE_FIELD_NUMBER;
          hash = (53 * hash) + getFile().hashCode();
        }
        if (hasDirectory()) {
          hash = (37 * hash) + DIRECTORY_FIELD_NUMBER;
          hash = (53 * hash) + getDirectory().hashCode();
        }
        if (hasSymlink()) {
          hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
          hash = (53 * hash) + getSymlink().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection.INode}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection.INode)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getFileFieldBuilder();
            getDirectoryFieldBuilder();
            getSymlinkFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          type_ = 1;
          id_ = 0L;
          name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          file_ = null;
          if (fileBuilder_ != null) {
            fileBuilder_.dispose();
            fileBuilder_ = null;
          }
          directory_ = null;
          if (directoryBuilder_ != null) {
            directoryBuilder_.dispose();
            directoryBuilder_ = null;
          }
          symlink_ = null;
          if (symlinkBuilder_ != null) {
            symlinkBuilder_.dispose();
            symlinkBuilder_ = null;
          }
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.type_ = type_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.id_ = id_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.file_ = fileBuilder_ == null
                ? file_
                : fileBuilder_.build();
            to_bitField0_ |= 0x00000008;
          }
          if (((from_bitField0_ & 0x00000010) != 0)) {
            result.directory_ = directoryBuilder_ == null
                ? directory_
                : directoryBuilder_.build();
            to_bitField0_ |= 0x00000010;
          }
          if (((from_bitField0_ & 0x00000020) != 0)) {
            result.symlink_ = symlinkBuilder_ == null
                ? symlink_
                : symlinkBuilder_.build();
            to_bitField0_ |= 0x00000020;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) return this;
          if (other.hasType()) {
            setType(other.getType());
          }
          if (other.hasId()) {
            setId(other.getId());
          }
          if (other.hasName()) {
            setName(other.getName());
          }
          if (other.hasFile()) {
            mergeFile(other.getFile());
          }
          if (other.hasDirectory()) {
            mergeDirectory(other.getDirectory());
          }
          if (other.hasSymlink()) {
            mergeSymlink(other.getSymlink());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (!hasType()) {
            return false;
          }
          if (!hasId()) {
            return false;
          }
          if (hasFile()) {
            if (!getFile().isInitialized()) {
              return false;
            }
          }
          if (hasDirectory()) {
            if (!getDirectory().isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type tmpValue =
                      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(1, tmpRaw);
                  } else {
                    type_ = tmpRaw;
                    bitField0_ |= 0x00000001;
                  }
                  break;
                } // case 8
                case 16: {
                  id_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 26: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 26
                case 34: {
                  input.readMessage(
                      getFileFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000008;
                  break;
                } // case 34
                case 42: {
                  input.readMessage(
                      getDirectoryFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000010;
                  break;
                } // case 42
                case 50: {
                  input.readMessage(
                      getSymlinkFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000020;
                  break;
                } // case 50
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int type_ = 1;
        /**
         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
         * @return Whether the type field is set.
         */
        @java.lang.Override public boolean hasType() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
         * @return The type.
         */
        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type getType() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.forNumber(type_);
          return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type.FILE : result;
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
         * @param value The type to set.
         * @return This builder for chaining.
         */
        public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Type value) {
          if (value == null) {
            throw new NullPointerException();
          }
          bitField0_ |= 0x00000001;
          type_ = value.getNumber();
          onChanged();
          return this;
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.INodeSection.INode.Type type = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearType() {
          bitField0_ = (bitField0_ & ~0x00000001);
          type_ = 1;
          onChanged();
          return this;
        }

        private long id_ ;
        /**
         * <code>required uint64 id = 2;</code>
         * @return Whether the id field is set.
         */
        @java.lang.Override
        public boolean hasId() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>required uint64 id = 2;</code>
         * @return The id.
         */
        @java.lang.Override
        public long getId() {
          return id_;
        }
        /**
         * <code>required uint64 id = 2;</code>
         * @param value The id to set.
         * @return This builder for chaining.
         */
        public Builder setId(long value) {

          id_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>required uint64 id = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearId() {
          bitField0_ = (bitField0_ & ~0x00000002);
          id_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes name = 3;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @return The name.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
          return name_;
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000004);
          name_ = getDefaultInstance().getName();
          onChanged();
          return this;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile file_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> fileBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         * @return Whether the file field is set.
         */
        public boolean hasFile() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         * @return The file.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getFile() {
          if (fileBuilder_ == null) {
            return file_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_;
          } else {
            return fileBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public Builder setFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
          if (fileBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            file_ = value;
          } else {
            fileBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public Builder setFile(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) {
          if (fileBuilder_ == null) {
            file_ = builderForValue.build();
          } else {
            fileBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public Builder mergeFile(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
          if (fileBuilder_ == null) {
            if (((bitField0_ & 0x00000008) != 0) &&
              file_ != null &&
              file_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
              getFileBuilder().mergeFrom(value);
            } else {
              file_ = value;
            }
          } else {
            fileBuilder_.mergeFrom(value);
          }
          if (file_ != null) {
            bitField0_ |= 0x00000008;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public Builder clearFile() {
          bitField0_ = (bitField0_ & ~0x00000008);
          file_ = null;
          if (fileBuilder_ != null) {
            fileBuilder_.dispose();
            fileBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getFileBuilder() {
          bitField0_ |= 0x00000008;
          onChanged();
          return getFileFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getFileOrBuilder() {
          if (fileBuilder_ != null) {
            return fileBuilder_.getMessageOrBuilder();
          } else {
            return file_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : file_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile file = 4;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> 
            getFileFieldBuilder() {
          if (fileBuilder_ == null) {
            fileBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>(
                    getFile(),
                    getParentForChildren(),
                    isClean());
            file_ = null;
          }
          return fileBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory directory_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> directoryBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         * @return Whether the directory field is set.
         */
        public boolean hasDirectory() {
          return ((bitField0_ & 0x00000010) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         * @return The directory.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getDirectory() {
          if (directoryBuilder_ == null) {
            return directory_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_;
          } else {
            return directoryBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public Builder setDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
          if (directoryBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            directory_ = value;
          } else {
            directoryBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public Builder setDirectory(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) {
          if (directoryBuilder_ == null) {
            directory_ = builderForValue.build();
          } else {
            directoryBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public Builder mergeDirectory(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
          if (directoryBuilder_ == null) {
            if (((bitField0_ & 0x00000010) != 0) &&
              directory_ != null &&
              directory_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) {
              getDirectoryBuilder().mergeFrom(value);
            } else {
              directory_ = value;
            }
          } else {
            directoryBuilder_.mergeFrom(value);
          }
          if (directory_ != null) {
            bitField0_ |= 0x00000010;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public Builder clearDirectory() {
          bitField0_ = (bitField0_ & ~0x00000010);
          directory_ = null;
          if (directoryBuilder_ != null) {
            directoryBuilder_.dispose();
            directoryBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getDirectoryBuilder() {
          bitField0_ |= 0x00000010;
          onChanged();
          return getDirectoryFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getDirectoryOrBuilder() {
          if (directoryBuilder_ != null) {
            return directoryBuilder_.getMessageOrBuilder();
          } else {
            return directory_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : directory_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory directory = 5;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> 
            getDirectoryFieldBuilder() {
          if (directoryBuilder_ == null) {
            directoryBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>(
                    getDirectory(),
                    getParentForChildren(),
                    isClean());
            directory_ = null;
          }
          return directoryBuilder_;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink symlink_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> symlinkBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         * @return Whether the symlink field is set.
         */
        public boolean hasSymlink() {
          return ((bitField0_ & 0x00000020) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         * @return The symlink.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink getSymlink() {
          if (symlinkBuilder_ == null) {
            return symlink_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_;
          } else {
            return symlinkBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public Builder setSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) {
          if (symlinkBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            symlink_ = value;
          } else {
            symlinkBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public Builder setSymlink(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder builderForValue) {
          if (symlinkBuilder_ == null) {
            symlink_ = builderForValue.build();
          } else {
            symlinkBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public Builder mergeSymlink(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink value) {
          if (symlinkBuilder_ == null) {
            if (((bitField0_ & 0x00000020) != 0) &&
              symlink_ != null &&
              symlink_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance()) {
              getSymlinkBuilder().mergeFrom(value);
            } else {
              symlink_ = value;
            }
          } else {
            symlinkBuilder_.mergeFrom(value);
          }
          if (symlink_ != null) {
            bitField0_ |= 0x00000020;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public Builder clearSymlink() {
          bitField0_ = (bitField0_ & ~0x00000020);
          symlink_ = null;
          if (symlinkBuilder_ != null) {
            symlinkBuilder_.dispose();
            symlinkBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder getSymlinkBuilder() {
          bitField0_ |= 0x00000020;
          onChanged();
          return getSymlinkFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder getSymlinkOrBuilder() {
          if (symlinkBuilder_ != null) {
            return symlinkBuilder_.getMessageOrBuilder();
          } else {
            return symlink_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.getDefaultInstance() : symlink_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeSymlink symlink = 6;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder> 
            getSymlinkFieldBuilder() {
          if (symlinkBuilder_ == null) {
            symlinkBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlinkOrBuilder>(
                    getSymlink(),
                    getParentForChildren(),
                    isClean());
            symlink_ = null;
          }
          return symlinkBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection.INode)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection.INode)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INode>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INode>() {
        @java.lang.Override
        public INode parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<INode> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<INode> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private int bitField0_;
    public static final int LASTINODEID_FIELD_NUMBER = 1;
    private long lastInodeId_ = 0L;
    /**
     * <code>optional uint64 lastInodeId = 1;</code>
     * @return Whether the lastInodeId field is set.
     */
    @java.lang.Override
    public boolean hasLastInodeId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint64 lastInodeId = 1;</code>
     * @return The lastInodeId.
     */
    @java.lang.Override
    public long getLastInodeId() {
      return lastInodeId_;
    }

    public static final int NUMINODES_FIELD_NUMBER = 2;
    private long numInodes_ = 0L;
    /**
     * <pre>
     * repeated INodes..
     * </pre>
     *
     * <code>optional uint64 numInodes = 2;</code>
     * @return Whether the numInodes field is set.
     */
    @java.lang.Override
    public boolean hasNumInodes() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * repeated INodes..
     * </pre>
     *
     * <code>optional uint64 numInodes = 2;</code>
     * @return The numInodes.
     */
    @java.lang.Override
    public long getNumInodes() {
      return numInodes_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, lastInodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, numInodes_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, lastInodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, numInodes_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) obj;

      if (hasLastInodeId() != other.hasLastInodeId()) return false;
      if (hasLastInodeId()) {
        if (getLastInodeId()
            != other.getLastInodeId()) return false;
      }
      if (hasNumInodes() != other.hasNumInodes()) return false;
      if (hasNumInodes()) {
        if (getNumInodes()
            != other.getNumInodes()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasLastInodeId()) {
        hash = (37 * hash) + LASTINODEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastInodeId());
      }
      if (hasNumInodes()) {
        hash = (37 * hash) + NUMINODES_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumInodes());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Permission is serialized as a 64-bit long. [0:24):[25:48):[48:64) (in Big Endian).
     * The first and the second parts are the string ids of the user and
     * group name, and the last 16 bits are the permission bits.
     *
     * Name: INODE
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        lastInodeId_ = 0L;
        numInodes_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.lastInodeId_ = lastInodeId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.numInodes_ = numInodes_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.getDefaultInstance()) return this;
        if (other.hasLastInodeId()) {
          setLastInodeId(other.getLastInodeId());
        }
        if (other.hasNumInodes()) {
          setNumInodes(other.getNumInodes());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                lastInodeId_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                numInodes_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long lastInodeId_ ;
      /**
       * <code>optional uint64 lastInodeId = 1;</code>
       * @return Whether the lastInodeId field is set.
       */
      @java.lang.Override
      public boolean hasLastInodeId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 lastInodeId = 1;</code>
       * @return The lastInodeId.
       */
      @java.lang.Override
      public long getLastInodeId() {
        return lastInodeId_;
      }
      /**
       * <code>optional uint64 lastInodeId = 1;</code>
       * @param value The lastInodeId to set.
       * @return This builder for chaining.
       */
      public Builder setLastInodeId(long value) {

        lastInodeId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastInodeId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastInodeId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        lastInodeId_ = 0L;
        onChanged();
        return this;
      }

      private long numInodes_ ;
      /**
       * <pre>
       * repeated INodes..
       * </pre>
       *
       * <code>optional uint64 numInodes = 2;</code>
       * @return Whether the numInodes field is set.
       */
      @java.lang.Override
      public boolean hasNumInodes() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * repeated INodes..
       * </pre>
       *
       * <code>optional uint64 numInodes = 2;</code>
       * @return The numInodes.
       */
      @java.lang.Override
      public long getNumInodes() {
        return numInodes_;
      }
      /**
       * <pre>
       * repeated INodes..
       * </pre>
       *
       * <code>optional uint64 numInodes = 2;</code>
       * @param value The numInodes to set.
       * @return This builder for chaining.
       */
      public Builder setNumInodes(long value) {

        numInodes_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * repeated INodes..
       * </pre>
       *
       * <code>optional uint64 numInodes = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumInodes() {
        bitField0_ = (bitField0_ & ~0x00000002);
        numInodes_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeSection>() {
      @java.lang.Override
      public INodeSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<INodeSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface FilesUnderConstructionSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * This section records information about under-construction files for
   * reconstructing the lease map.
   * NAME: FILES_UNDERCONSTRUCTION
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection}
   */
  public static final class FilesUnderConstructionSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
      FilesUnderConstructionSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FilesUnderConstructionSection.newBuilder() to construct.
    private FilesUnderConstructionSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FilesUnderConstructionSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FilesUnderConstructionSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class);
    }

    public interface FileUnderConstructionEntryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return Whether the inodeId field is set.
       */
      boolean hasInodeId();
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return The inodeId.
       */
      long getInodeId();

      /**
       * <code>optional string fullPath = 2;</code>
       * @return Whether the fullPath field is set.
       */
      boolean hasFullPath();
      /**
       * <code>optional string fullPath = 2;</code>
       * @return The fullPath.
       */
      java.lang.String getFullPath();
      /**
       * <code>optional string fullPath = 2;</code>
       * @return The bytes for fullPath.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getFullPathBytes();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry}
     */
    public static final class FileUnderConstructionEntry extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
        FileUnderConstructionEntryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use FileUnderConstructionEntry.newBuilder() to construct.
      private FileUnderConstructionEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private FileUnderConstructionEntry() {
        fullPath_ = "";
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new FileUnderConstructionEntry();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class);
      }

      private int bitField0_;
      public static final int INODEID_FIELD_NUMBER = 1;
      private long inodeId_ = 0L;
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return Whether the inodeId field is set.
       */
      @java.lang.Override
      public boolean hasInodeId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return The inodeId.
       */
      @java.lang.Override
      public long getInodeId() {
        return inodeId_;
      }

      public static final int FULLPATH_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private volatile java.lang.Object fullPath_ = "";
      /**
       * <code>optional string fullPath = 2;</code>
       * @return Whether the fullPath field is set.
       */
      @java.lang.Override
      public boolean hasFullPath() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string fullPath = 2;</code>
       * @return The fullPath.
       */
      @java.lang.Override
      public java.lang.String getFullPath() {
        java.lang.Object ref = fullPath_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            fullPath_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string fullPath = 2;</code>
       * @return The bytes for fullPath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getFullPathBytes() {
        java.lang.Object ref = fullPath_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          fullPath_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt64(1, inodeId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, fullPath_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(1, inodeId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, fullPath_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) obj;

        if (hasInodeId() != other.hasInodeId()) return false;
        if (hasInodeId()) {
          if (getInodeId()
              != other.getInodeId()) return false;
        }
        if (hasFullPath() != other.hasFullPath()) return false;
        if (hasFullPath()) {
          if (!getFullPath()
              .equals(other.getFullPath())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasInodeId()) {
          hash = (37 * hash) + INODEID_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getInodeId());
        }
        if (hasFullPath()) {
          hash = (37 * hash) + FULLPATH_FIELD_NUMBER;
          hash = (53 * hash) + getFullPath().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          inodeId_ = 0L;
          fullPath_ = "";
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.inodeId_ = inodeId_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.fullPath_ = fullPath_;
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry.getDefaultInstance()) return this;
          if (other.hasInodeId()) {
            setInodeId(other.getInodeId());
          }
          if (other.hasFullPath()) {
            fullPath_ = other.fullPath_;
            bitField0_ |= 0x00000002;
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  inodeId_ = input.readUInt64();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 18: {
                  fullPath_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private long inodeId_ ;
        /**
         * <code>optional uint64 inodeId = 1;</code>
         * @return Whether the inodeId field is set.
         */
        @java.lang.Override
        public boolean hasInodeId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint64 inodeId = 1;</code>
         * @return The inodeId.
         */
        @java.lang.Override
        public long getInodeId() {
          return inodeId_;
        }
        /**
         * <code>optional uint64 inodeId = 1;</code>
         * @param value The inodeId to set.
         * @return This builder for chaining.
         */
        public Builder setInodeId(long value) {

          inodeId_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 inodeId = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearInodeId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          inodeId_ = 0L;
          onChanged();
          return this;
        }

        private java.lang.Object fullPath_ = "";
        /**
         * <code>optional string fullPath = 2;</code>
         * @return Whether the fullPath field is set.
         */
        public boolean hasFullPath() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional string fullPath = 2;</code>
         * @return The fullPath.
         */
        public java.lang.String getFullPath() {
          java.lang.Object ref = fullPath_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              fullPath_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string fullPath = 2;</code>
         * @return The bytes for fullPath.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getFullPathBytes() {
          java.lang.Object ref = fullPath_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            fullPath_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string fullPath = 2;</code>
         * @param value The fullPath to set.
         * @return This builder for chaining.
         */
        public Builder setFullPath(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          fullPath_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional string fullPath = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearFullPath() {
          fullPath_ = getDefaultInstance().getFullPath();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
          return this;
        }
        /**
         * <code>optional string fullPath = 2;</code>
         * @param value The bytes for fullPath to set.
         * @return This builder for chaining.
         */
        public Builder setFullPathBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          fullPath_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection.FileUnderConstructionEntry)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionEntry>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FileUnderConstructionEntry>() {
        @java.lang.Override
        public FileUnderConstructionEntry parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionEntry> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<FileUnderConstructionEntry> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * This section records information about under-construction files for
     * reconstructing the lease map.
     * NAME: FILES_UNDERCONSTRUCTION
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.FilesUnderConstructionSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.FilesUnderConstructionSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FilesUnderConstructionSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FilesUnderConstructionSection>() {
      @java.lang.Override
      public FilesUnderConstructionSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FilesUnderConstructionSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FilesUnderConstructionSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface INodeDirectorySectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeDirectorySection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * This section records the children of each directories
   * NAME: INODE_DIR
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection}
   */
  public static final class INodeDirectorySection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeDirectorySection)
      INodeDirectorySectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use INodeDirectorySection.newBuilder() to construct.
    private INodeDirectorySection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private INodeDirectorySection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new INodeDirectorySection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class);
    }

    public interface DirEntryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint64 parent = 1;</code>
       * @return Whether the parent field is set.
       */
      boolean hasParent();
      /**
       * <code>optional uint64 parent = 1;</code>
       * @return The parent.
       */
      long getParent();

      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @return A list containing the children.
       */
      java.util.List<java.lang.Long> getChildrenList();
      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @return The count of children.
       */
      int getChildrenCount();
      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The children at the given index.
       */
      long getChildren(int index);

      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @return A list containing the refChildren.
       */
      java.util.List<java.lang.Integer> getRefChildrenList();
      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @return The count of refChildren.
       */
      int getRefChildrenCount();
      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The refChildren at the given index.
       */
      int getRefChildren(int index);
    }
    /**
     * <pre>
     **
     * A single DirEntry needs to fit in the default PB max message size of
     * 64MB. Please be careful when adding more fields to a DirEntry!
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry}
     */
    public static final class DirEntry extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
        DirEntryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use DirEntry.newBuilder() to construct.
      private DirEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private DirEntry() {
        children_ = emptyLongList();
        refChildren_ = emptyIntList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new DirEntry();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class);
      }

      private int bitField0_;
      public static final int PARENT_FIELD_NUMBER = 1;
      private long parent_ = 0L;
      /**
       * <code>optional uint64 parent = 1;</code>
       * @return Whether the parent field is set.
       */
      @java.lang.Override
      public boolean hasParent() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 parent = 1;</code>
       * @return The parent.
       */
      @java.lang.Override
      public long getParent() {
        return parent_;
      }

      public static final int CHILDREN_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList children_ =
          emptyLongList();
      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @return A list containing the children.
       */
      @java.lang.Override
      public java.util.List<java.lang.Long>
          getChildrenList() {
        return children_;
      }
      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @return The count of children.
       */
      public int getChildrenCount() {
        return children_.size();
      }
      /**
       * <pre>
       * children that are not reference nodes
       * </pre>
       *
       * <code>repeated uint64 children = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The children at the given index.
       */
      public long getChildren(int index) {
        return children_.getLong(index);
      }
      private int childrenMemoizedSerializedSize = -1;

      public static final int REFCHILDREN_FIELD_NUMBER = 3;
      @SuppressWarnings("serial")
      private org.apache.hadoop.thirdparty.protobuf.Internal.IntList refChildren_ =
          emptyIntList();
      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @return A list containing the refChildren.
       */
      @java.lang.Override
      public java.util.List<java.lang.Integer>
          getRefChildrenList() {
        return refChildren_;
      }
      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @return The count of refChildren.
       */
      public int getRefChildrenCount() {
        return refChildren_.size();
      }
      /**
       * <pre>
       * children that are reference nodes, each element is a reference node id
       * </pre>
       *
       * <code>repeated uint32 refChildren = 3 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The refChildren at the given index.
       */
      public int getRefChildren(int index) {
        return refChildren_.getInt(index);
      }
      private int refChildrenMemoizedSerializedSize = -1;

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        getSerializedSize();
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt64(1, parent_);
        }
        if (getChildrenList().size() > 0) {
          output.writeUInt32NoTag(18);
          output.writeUInt32NoTag(childrenMemoizedSerializedSize);
        }
        for (int i = 0; i < children_.size(); i++) {
          output.writeUInt64NoTag(children_.getLong(i));
        }
        if (getRefChildrenList().size() > 0) {
          output.writeUInt32NoTag(26);
          output.writeUInt32NoTag(refChildrenMemoizedSerializedSize);
        }
        for (int i = 0; i < refChildren_.size(); i++) {
          output.writeUInt32NoTag(refChildren_.getInt(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(1, parent_);
        }
        {
          int dataSize = 0;
          for (int i = 0; i < children_.size(); i++) {
            dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeUInt64SizeNoTag(children_.getLong(i));
          }
          size += dataSize;
          if (!getChildrenList().isEmpty()) {
            size += 1;
            size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
                .computeInt32SizeNoTag(dataSize);
          }
          childrenMemoizedSerializedSize = dataSize;
        }
        {
          int dataSize = 0;
          for (int i = 0; i < refChildren_.size(); i++) {
            dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeUInt32SizeNoTag(refChildren_.getInt(i));
          }
          size += dataSize;
          if (!getRefChildrenList().isEmpty()) {
            size += 1;
            size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
                .computeInt32SizeNoTag(dataSize);
          }
          refChildrenMemoizedSerializedSize = dataSize;
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) obj;

        if (hasParent() != other.hasParent()) return false;
        if (hasParent()) {
          if (getParent()
              != other.getParent()) return false;
        }
        if (!getChildrenList()
            .equals(other.getChildrenList())) return false;
        if (!getRefChildrenList()
            .equals(other.getRefChildrenList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasParent()) {
          hash = (37 * hash) + PARENT_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getParent());
        }
        if (getChildrenCount() > 0) {
          hash = (37 * hash) + CHILDREN_FIELD_NUMBER;
          hash = (53 * hash) + getChildrenList().hashCode();
        }
        if (getRefChildrenCount() > 0) {
          hash = (37 * hash) + REFCHILDREN_FIELD_NUMBER;
          hash = (53 * hash) + getRefChildrenList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * <pre>
       **
       * A single DirEntry needs to fit in the default PB max message size of
       * 64MB. Please be careful when adding more fields to a DirEntry!
       * </pre>
       *
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          parent_ = 0L;
          children_ = emptyLongList();
          refChildren_ = emptyIntList();
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.parent_ = parent_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            children_.makeImmutable();
            result.children_ = children_;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            refChildren_.makeImmutable();
            result.refChildren_ = refChildren_;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry.getDefaultInstance()) return this;
          if (other.hasParent()) {
            setParent(other.getParent());
          }
          if (!other.children_.isEmpty()) {
            if (children_.isEmpty()) {
              children_ = other.children_;
              children_.makeImmutable();
              bitField0_ |= 0x00000002;
            } else {
              ensureChildrenIsMutable();
              children_.addAll(other.children_);
            }
            onChanged();
          }
          if (!other.refChildren_.isEmpty()) {
            if (refChildren_.isEmpty()) {
              refChildren_ = other.refChildren_;
              refChildren_.makeImmutable();
              bitField0_ |= 0x00000004;
            } else {
              ensureRefChildrenIsMutable();
              refChildren_.addAll(other.refChildren_);
            }
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  parent_ = input.readUInt64();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  long v = input.readUInt64();
                  ensureChildrenIsMutable();
                  children_.addLong(v);
                  break;
                } // case 16
                case 18: {
                  int length = input.readRawVarint32();
                  int limit = input.pushLimit(length);
                  ensureChildrenIsMutable();
                  while (input.getBytesUntilLimit() > 0) {
                    children_.addLong(input.readUInt64());
                  }
                  input.popLimit(limit);
                  break;
                } // case 18
                case 24: {
                  int v = input.readUInt32();
                  ensureRefChildrenIsMutable();
                  refChildren_.addInt(v);
                  break;
                } // case 24
                case 26: {
                  int length = input.readRawVarint32();
                  int limit = input.pushLimit(length);
                  ensureRefChildrenIsMutable();
                  while (input.getBytesUntilLimit() > 0) {
                    refChildren_.addInt(input.readUInt32());
                  }
                  input.popLimit(limit);
                  break;
                } // case 26
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private long parent_ ;
        /**
         * <code>optional uint64 parent = 1;</code>
         * @return Whether the parent field is set.
         */
        @java.lang.Override
        public boolean hasParent() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint64 parent = 1;</code>
         * @return The parent.
         */
        @java.lang.Override
        public long getParent() {
          return parent_;
        }
        /**
         * <code>optional uint64 parent = 1;</code>
         * @param value The parent to set.
         * @return This builder for chaining.
         */
        public Builder setParent(long value) {

          parent_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 parent = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearParent() {
          bitField0_ = (bitField0_ & ~0x00000001);
          parent_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.Internal.LongList children_ = emptyLongList();
        private void ensureChildrenIsMutable() {
          if (!children_.isModifiable()) {
            children_ = makeMutableCopy(children_);
          }
          bitField0_ |= 0x00000002;
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @return A list containing the children.
         */
        public java.util.List<java.lang.Long>
            getChildrenList() {
          children_.makeImmutable();
          return children_;
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @return The count of children.
         */
        public int getChildrenCount() {
          return children_.size();
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @param index The index of the element to return.
         * @return The children at the given index.
         */
        public long getChildren(int index) {
          return children_.getLong(index);
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @param index The index to set the value at.
         * @param value The children to set.
         * @return This builder for chaining.
         */
        public Builder setChildren(
            int index, long value) {

          ensureChildrenIsMutable();
          children_.setLong(index, value);
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @param value The children to add.
         * @return This builder for chaining.
         */
        public Builder addChildren(long value) {

          ensureChildrenIsMutable();
          children_.addLong(value);
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @param values The children to add.
         * @return This builder for chaining.
         */
        public Builder addAllChildren(
            java.lang.Iterable<? extends java.lang.Long> values) {
          ensureChildrenIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, children_);
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are not reference nodes
         * </pre>
         *
         * <code>repeated uint64 children = 2 [packed = true];</code>
         * @return This builder for chaining.
         */
        public Builder clearChildren() {
          children_ = emptyLongList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.Internal.IntList refChildren_ = emptyIntList();
        private void ensureRefChildrenIsMutable() {
          if (!refChildren_.isModifiable()) {
            refChildren_ = makeMutableCopy(refChildren_);
          }
          bitField0_ |= 0x00000004;
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @return A list containing the refChildren.
         */
        public java.util.List<java.lang.Integer>
            getRefChildrenList() {
          refChildren_.makeImmutable();
          return refChildren_;
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @return The count of refChildren.
         */
        public int getRefChildrenCount() {
          return refChildren_.size();
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @param index The index of the element to return.
         * @return The refChildren at the given index.
         */
        public int getRefChildren(int index) {
          return refChildren_.getInt(index);
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @param index The index to set the value at.
         * @param value The refChildren to set.
         * @return This builder for chaining.
         */
        public Builder setRefChildren(
            int index, int value) {

          ensureRefChildrenIsMutable();
          refChildren_.setInt(index, value);
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @param value The refChildren to add.
         * @return This builder for chaining.
         */
        public Builder addRefChildren(int value) {

          ensureRefChildrenIsMutable();
          refChildren_.addInt(value);
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @param values The refChildren to add.
         * @return This builder for chaining.
         */
        public Builder addAllRefChildren(
            java.lang.Iterable<? extends java.lang.Integer> values) {
          ensureRefChildrenIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, refChildren_);
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * children that are reference nodes, each element is a reference node id
         * </pre>
         *
         * <code>repeated uint32 refChildren = 3 [packed = true];</code>
         * @return This builder for chaining.
         */
        public Builder clearRefChildren() {
          refChildren_ = emptyIntList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection.DirEntry)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DirEntry>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DirEntry>() {
        @java.lang.Override
        public DirEntry parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<DirEntry> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<DirEntry> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.DirEntry getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * This section records the children of each directories
     * NAME: INODE_DIR
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeDirectorySection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeDirectorySection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeDirectorySection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeDirectorySection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectorySection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeDirectorySection>() {
      @java.lang.Override
      public INodeDirectorySection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectorySection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<INodeDirectorySection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface INodeReferenceSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeReferenceSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection}
   */
  public static final class INodeReferenceSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeReferenceSection)
      INodeReferenceSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use INodeReferenceSection.newBuilder() to construct.
    private INodeReferenceSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private INodeReferenceSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new INodeReferenceSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class);
    }

    public interface INodeReferenceOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <pre>
       * id of the referred inode
       * </pre>
       *
       * <code>optional uint64 referredId = 1;</code>
       * @return Whether the referredId field is set.
       */
      boolean hasReferredId();
      /**
       * <pre>
       * id of the referred inode
       * </pre>
       *
       * <code>optional uint64 referredId = 1;</code>
       * @return The referredId.
       */
      long getReferredId();

      /**
       * <pre>
       * local name recorded in WithName
       * </pre>
       *
       * <code>optional bytes name = 2;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <pre>
       * local name recorded in WithName
       * </pre>
       *
       * <code>optional bytes name = 2;</code>
       * @return The name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getName();

      /**
       * <pre>
       * recorded in DstReference
       * </pre>
       *
       * <code>optional uint32 dstSnapshotId = 3;</code>
       * @return Whether the dstSnapshotId field is set.
       */
      boolean hasDstSnapshotId();
      /**
       * <pre>
       * recorded in DstReference
       * </pre>
       *
       * <code>optional uint32 dstSnapshotId = 3;</code>
       * @return The dstSnapshotId.
       */
      int getDstSnapshotId();

      /**
       * <pre>
       * recorded in WithName
       * </pre>
       *
       * <code>optional uint32 lastSnapshotId = 4;</code>
       * @return Whether the lastSnapshotId field is set.
       */
      boolean hasLastSnapshotId();
      /**
       * <pre>
       * recorded in WithName
       * </pre>
       *
       * <code>optional uint32 lastSnapshotId = 4;</code>
       * @return The lastSnapshotId.
       */
      int getLastSnapshotId();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference}
     */
    public static final class INodeReference extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
        INodeReferenceOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use INodeReference.newBuilder() to construct.
      private INodeReference(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private INodeReference() {
        name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new INodeReference();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class);
      }

      private int bitField0_;
      public static final int REFERREDID_FIELD_NUMBER = 1;
      private long referredId_ = 0L;
      /**
       * <pre>
       * id of the referred inode
       * </pre>
       *
       * <code>optional uint64 referredId = 1;</code>
       * @return Whether the referredId field is set.
       */
      @java.lang.Override
      public boolean hasReferredId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * id of the referred inode
       * </pre>
       *
       * <code>optional uint64 referredId = 1;</code>
       * @return The referredId.
       */
      @java.lang.Override
      public long getReferredId() {
        return referredId_;
      }

      public static final int NAME_FIELD_NUMBER = 2;
      private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <pre>
       * local name recorded in WithName
       * </pre>
       *
       * <code>optional bytes name = 2;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * local name recorded in WithName
       * </pre>
       *
       * <code>optional bytes name = 2;</code>
       * @return The name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
        return name_;
      }

      public static final int DSTSNAPSHOTID_FIELD_NUMBER = 3;
      private int dstSnapshotId_ = 0;
      /**
       * <pre>
       * recorded in DstReference
       * </pre>
       *
       * <code>optional uint32 dstSnapshotId = 3;</code>
       * @return Whether the dstSnapshotId field is set.
       */
      @java.lang.Override
      public boolean hasDstSnapshotId() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * recorded in DstReference
       * </pre>
       *
       * <code>optional uint32 dstSnapshotId = 3;</code>
       * @return The dstSnapshotId.
       */
      @java.lang.Override
      public int getDstSnapshotId() {
        return dstSnapshotId_;
      }

      public static final int LASTSNAPSHOTID_FIELD_NUMBER = 4;
      private int lastSnapshotId_ = 0;
      /**
       * <pre>
       * recorded in WithName
       * </pre>
       *
       * <code>optional uint32 lastSnapshotId = 4;</code>
       * @return Whether the lastSnapshotId field is set.
       */
      @java.lang.Override
      public boolean hasLastSnapshotId() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * recorded in WithName
       * </pre>
       *
       * <code>optional uint32 lastSnapshotId = 4;</code>
       * @return The lastSnapshotId.
       */
      @java.lang.Override
      public int getLastSnapshotId() {
        return lastSnapshotId_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt64(1, referredId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeBytes(2, name_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt32(3, dstSnapshotId_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeUInt32(4, lastSnapshotId_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(1, referredId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(2, name_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(3, dstSnapshotId_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(4, lastSnapshotId_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) obj;

        if (hasReferredId() != other.hasReferredId()) return false;
        if (hasReferredId()) {
          if (getReferredId()
              != other.getReferredId()) return false;
        }
        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (hasDstSnapshotId() != other.hasDstSnapshotId()) return false;
        if (hasDstSnapshotId()) {
          if (getDstSnapshotId()
              != other.getDstSnapshotId()) return false;
        }
        if (hasLastSnapshotId() != other.hasLastSnapshotId()) return false;
        if (hasLastSnapshotId()) {
          if (getLastSnapshotId()
              != other.getLastSnapshotId()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasReferredId()) {
          hash = (37 * hash) + REFERREDID_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getReferredId());
        }
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        if (hasDstSnapshotId()) {
          hash = (37 * hash) + DSTSNAPSHOTID_FIELD_NUMBER;
          hash = (53 * hash) + getDstSnapshotId();
        }
        if (hasLastSnapshotId()) {
          hash = (37 * hash) + LASTSNAPSHOTID_FIELD_NUMBER;
          hash = (53 * hash) + getLastSnapshotId();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReferenceOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          referredId_ = 0L;
          name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          dstSnapshotId_ = 0;
          lastSnapshotId_ = 0;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.referredId_ = referredId_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.dstSnapshotId_ = dstSnapshotId_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.lastSnapshotId_ = lastSnapshotId_;
            to_bitField0_ |= 0x00000008;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference.getDefaultInstance()) return this;
          if (other.hasReferredId()) {
            setReferredId(other.getReferredId());
          }
          if (other.hasName()) {
            setName(other.getName());
          }
          if (other.hasDstSnapshotId()) {
            setDstSnapshotId(other.getDstSnapshotId());
          }
          if (other.hasLastSnapshotId()) {
            setLastSnapshotId(other.getLastSnapshotId());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  referredId_ = input.readUInt64();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 18: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                case 24: {
                  dstSnapshotId_ = input.readUInt32();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                case 32: {
                  lastSnapshotId_ = input.readUInt32();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 32
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private long referredId_ ;
        /**
         * <pre>
         * id of the referred inode
         * </pre>
         *
         * <code>optional uint64 referredId = 1;</code>
         * @return Whether the referredId field is set.
         */
        @java.lang.Override
        public boolean hasReferredId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <pre>
         * id of the referred inode
         * </pre>
         *
         * <code>optional uint64 referredId = 1;</code>
         * @return The referredId.
         */
        @java.lang.Override
        public long getReferredId() {
          return referredId_;
        }
        /**
         * <pre>
         * id of the referred inode
         * </pre>
         *
         * <code>optional uint64 referredId = 1;</code>
         * @param value The referredId to set.
         * @return This builder for chaining.
         */
        public Builder setReferredId(long value) {

          referredId_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of the referred inode
         * </pre>
         *
         * <code>optional uint64 referredId = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearReferredId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          referredId_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <pre>
         * local name recorded in WithName
         * </pre>
         *
         * <code>optional bytes name = 2;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <pre>
         * local name recorded in WithName
         * </pre>
         *
         * <code>optional bytes name = 2;</code>
         * @return The name.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
          return name_;
        }
        /**
         * <pre>
         * local name recorded in WithName
         * </pre>
         *
         * <code>optional bytes name = 2;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * local name recorded in WithName
         * </pre>
         *
         * <code>optional bytes name = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000002);
          name_ = getDefaultInstance().getName();
          onChanged();
          return this;
        }

        private int dstSnapshotId_ ;
        /**
         * <pre>
         * recorded in DstReference
         * </pre>
         *
         * <code>optional uint32 dstSnapshotId = 3;</code>
         * @return Whether the dstSnapshotId field is set.
         */
        @java.lang.Override
        public boolean hasDstSnapshotId() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <pre>
         * recorded in DstReference
         * </pre>
         *
         * <code>optional uint32 dstSnapshotId = 3;</code>
         * @return The dstSnapshotId.
         */
        @java.lang.Override
        public int getDstSnapshotId() {
          return dstSnapshotId_;
        }
        /**
         * <pre>
         * recorded in DstReference
         * </pre>
         *
         * <code>optional uint32 dstSnapshotId = 3;</code>
         * @param value The dstSnapshotId to set.
         * @return This builder for chaining.
         */
        public Builder setDstSnapshotId(int value) {

          dstSnapshotId_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * recorded in DstReference
         * </pre>
         *
         * <code>optional uint32 dstSnapshotId = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearDstSnapshotId() {
          bitField0_ = (bitField0_ & ~0x00000004);
          dstSnapshotId_ = 0;
          onChanged();
          return this;
        }

        private int lastSnapshotId_ ;
        /**
         * <pre>
         * recorded in WithName
         * </pre>
         *
         * <code>optional uint32 lastSnapshotId = 4;</code>
         * @return Whether the lastSnapshotId field is set.
         */
        @java.lang.Override
        public boolean hasLastSnapshotId() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <pre>
         * recorded in WithName
         * </pre>
         *
         * <code>optional uint32 lastSnapshotId = 4;</code>
         * @return The lastSnapshotId.
         */
        @java.lang.Override
        public int getLastSnapshotId() {
          return lastSnapshotId_;
        }
        /**
         * <pre>
         * recorded in WithName
         * </pre>
         *
         * <code>optional uint32 lastSnapshotId = 4;</code>
         * @param value The lastSnapshotId to set.
         * @return This builder for chaining.
         */
        public Builder setLastSnapshotId(int value) {

          lastSnapshotId_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * recorded in WithName
         * </pre>
         *
         * <code>optional uint32 lastSnapshotId = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearLastSnapshotId() {
          bitField0_ = (bitField0_ & ~0x00000008);
          lastSnapshotId_ = 0;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection.INodeReference)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeReference>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeReference>() {
        @java.lang.Override
        public INodeReference parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeReference> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<INodeReference> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.INodeReference getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.INodeReferenceSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.INodeReferenceSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.INodeReferenceSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.INodeReferenceSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<INodeReferenceSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<INodeReferenceSection>() {
      @java.lang.Override
      public INodeReferenceSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<INodeReferenceSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<INodeReferenceSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint32 snapshotCounter = 1;</code>
     * @return Whether the snapshotCounter field is set.
     */
    boolean hasSnapshotCounter();
    /**
     * <code>optional uint32 snapshotCounter = 1;</code>
     * @return The snapshotCounter.
     */
    int getSnapshotCounter();

    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @return A list containing the snapshottableDir.
     */
    java.util.List<java.lang.Long> getSnapshottableDirList();
    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @return The count of snapshottableDir.
     */
    int getSnapshottableDirCount();
    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The snapshottableDir at the given index.
     */
    long getSnapshottableDir(int index);

    /**
     * <pre>
     * total number of snapshots
     * </pre>
     *
     * <code>optional uint32 numSnapshots = 3;</code>
     * @return Whether the numSnapshots field is set.
     */
    boolean hasNumSnapshots();
    /**
     * <pre>
     * total number of snapshots
     * </pre>
     *
     * <code>optional uint32 numSnapshots = 3;</code>
     * @return The numSnapshots.
     */
    int getNumSnapshots();
  }
  /**
   * <pre>
   **
   * This section records the information about snapshot
   * NAME: SNAPSHOT
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection}
   */
  public static final class SnapshotSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotSection)
      SnapshotSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotSection.newBuilder() to construct.
    private SnapshotSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotSection() {
      snapshottableDir_ = emptyLongList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class);
    }

    public interface SnapshotOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      boolean hasSnapshotId();
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      int getSnapshotId();

      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       * @return Whether the root field is set.
       */
      boolean hasRoot();
      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       * @return The root.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot();
      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot}
     */
    public static final class Snapshot extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
        SnapshotOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use Snapshot.newBuilder() to construct.
      private Snapshot(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private Snapshot() {
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new Snapshot();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class);
      }

      private int bitField0_;
      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
      private int snapshotId_ = 0;
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      @java.lang.Override
      public int getSnapshotId() {
        return snapshotId_;
      }

      public static final int ROOT_FIELD_NUMBER = 2;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_;
      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       * @return Whether the root field is set.
       */
      @java.lang.Override
      public boolean hasRoot() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       * @return The root.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() {
        return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_;
      }
      /**
       * <pre>
       * Snapshot root
       * </pre>
       *
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() {
        return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (hasRoot()) {
          if (!getRoot().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeMessage(2, getRoot());
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(2, getRoot());
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) obj;

        if (hasSnapshotId() != other.hasSnapshotId()) return false;
        if (hasSnapshotId()) {
          if (getSnapshotId()
              != other.getSnapshotId()) return false;
        }
        if (hasRoot() != other.hasRoot()) return false;
        if (hasRoot()) {
          if (!getRoot()
              .equals(other.getRoot())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasSnapshotId()) {
          hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER;
          hash = (53 * hash) + getSnapshotId();
        }
        if (hasRoot()) {
          hash = (37 * hash) + ROOT_FIELD_NUMBER;
          hash = (53 * hash) + getRoot().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection.Snapshot}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.SnapshotOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getRootFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          snapshotId_ = 0;
          root_ = null;
          if (rootBuilder_ != null) {
            rootBuilder_.dispose();
            rootBuilder_ = null;
          }
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.snapshotId_ = snapshotId_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.root_ = rootBuilder_ == null
                ? root_
                : rootBuilder_.build();
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot.getDefaultInstance()) return this;
          if (other.hasSnapshotId()) {
            setSnapshotId(other.getSnapshotId());
          }
          if (other.hasRoot()) {
            mergeRoot(other.getRoot());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (hasRoot()) {
            if (!getRoot().isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  snapshotId_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 18: {
                  input.readMessage(
                      getRootFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int snapshotId_ ;
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return Whether the snapshotId field is set.
         */
        @java.lang.Override
        public boolean hasSnapshotId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return The snapshotId.
         */
        @java.lang.Override
        public int getSnapshotId() {
          return snapshotId_;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @param value The snapshotId to set.
         * @return This builder for chaining.
         */
        public Builder setSnapshotId(int value) {

          snapshotId_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearSnapshotId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          snapshotId_ = 0;
          onChanged();
          return this;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode root_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> rootBuilder_;
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         * @return Whether the root field is set.
         */
        public boolean hasRoot() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         * @return The root.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode getRoot() {
          if (rootBuilder_ == null) {
            return root_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_;
          } else {
            return rootBuilder_.getMessage();
          }
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public Builder setRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) {
          if (rootBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            root_ = value;
          } else {
            rootBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public Builder setRoot(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder builderForValue) {
          if (rootBuilder_ == null) {
            root_ = builderForValue.build();
          } else {
            rootBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public Builder mergeRoot(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode value) {
          if (rootBuilder_ == null) {
            if (((bitField0_ & 0x00000002) != 0) &&
              root_ != null &&
              root_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance()) {
              getRootBuilder().mergeFrom(value);
            } else {
              root_ = value;
            }
          } else {
            rootBuilder_.mergeFrom(value);
          }
          if (root_ != null) {
            bitField0_ |= 0x00000002;
            onChanged();
          }
          return this;
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public Builder clearRoot() {
          bitField0_ = (bitField0_ & ~0x00000002);
          root_ = null;
          if (rootBuilder_ != null) {
            rootBuilder_.dispose();
            rootBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder getRootBuilder() {
          bitField0_ |= 0x00000002;
          onChanged();
          return getRootFieldBuilder().getBuilder();
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder getRootOrBuilder() {
          if (rootBuilder_ != null) {
            return rootBuilder_.getMessageOrBuilder();
          } else {
            return root_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.getDefaultInstance() : root_;
          }
        }
        /**
         * <pre>
         * Snapshot root
         * </pre>
         *
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INode root = 2;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder> 
            getRootFieldBuilder() {
          if (rootBuilder_ == null) {
            rootBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeOrBuilder>(
                    getRoot(),
                    getParentForChildren(),
                    isClean());
            root_ = null;
          }
          return rootBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection.Snapshot)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<Snapshot>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<Snapshot>() {
        @java.lang.Override
        public Snapshot parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<Snapshot> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<Snapshot> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Snapshot getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private int bitField0_;
    public static final int SNAPSHOTCOUNTER_FIELD_NUMBER = 1;
    private int snapshotCounter_ = 0;
    /**
     * <code>optional uint32 snapshotCounter = 1;</code>
     * @return Whether the snapshotCounter field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotCounter() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint32 snapshotCounter = 1;</code>
     * @return The snapshotCounter.
     */
    @java.lang.Override
    public int getSnapshotCounter() {
      return snapshotCounter_;
    }

    public static final int SNAPSHOTTABLEDIR_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.LongList snapshottableDir_ =
        emptyLongList();
    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @return A list containing the snapshottableDir.
     */
    @java.lang.Override
    public java.util.List<java.lang.Long>
        getSnapshottableDirList() {
      return snapshottableDir_;
    }
    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @return The count of snapshottableDir.
     */
    public int getSnapshottableDirCount() {
      return snapshottableDir_.size();
    }
    /**
     * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The snapshottableDir at the given index.
     */
    public long getSnapshottableDir(int index) {
      return snapshottableDir_.getLong(index);
    }
    private int snapshottableDirMemoizedSerializedSize = -1;

    public static final int NUMSNAPSHOTS_FIELD_NUMBER = 3;
    private int numSnapshots_ = 0;
    /**
     * <pre>
     * total number of snapshots
     * </pre>
     *
     * <code>optional uint32 numSnapshots = 3;</code>
     * @return Whether the numSnapshots field is set.
     */
    @java.lang.Override
    public boolean hasNumSnapshots() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * total number of snapshots
     * </pre>
     *
     * <code>optional uint32 numSnapshots = 3;</code>
     * @return The numSnapshots.
     */
    @java.lang.Override
    public int getNumSnapshots() {
      return numSnapshots_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, snapshotCounter_);
      }
      if (getSnapshottableDirList().size() > 0) {
        output.writeUInt32NoTag(18);
        output.writeUInt32NoTag(snapshottableDirMemoizedSerializedSize);
      }
      for (int i = 0; i < snapshottableDir_.size(); i++) {
        output.writeUInt64NoTag(snapshottableDir_.getLong(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(3, numSnapshots_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, snapshotCounter_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < snapshottableDir_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64SizeNoTag(snapshottableDir_.getLong(i));
        }
        size += dataSize;
        if (!getSnapshottableDirList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        snapshottableDirMemoizedSerializedSize = dataSize;
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, numSnapshots_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) obj;

      if (hasSnapshotCounter() != other.hasSnapshotCounter()) return false;
      if (hasSnapshotCounter()) {
        if (getSnapshotCounter()
            != other.getSnapshotCounter()) return false;
      }
      if (!getSnapshottableDirList()
          .equals(other.getSnapshottableDirList())) return false;
      if (hasNumSnapshots() != other.hasNumSnapshots()) return false;
      if (hasNumSnapshots()) {
        if (getNumSnapshots()
            != other.getNumSnapshots()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSnapshotCounter()) {
        hash = (37 * hash) + SNAPSHOTCOUNTER_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotCounter();
      }
      if (getSnapshottableDirCount() > 0) {
        hash = (37 * hash) + SNAPSHOTTABLEDIR_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshottableDirList().hashCode();
      }
      if (hasNumSnapshots()) {
        hash = (37 * hash) + NUMSNAPSHOTS_FIELD_NUMBER;
        hash = (53 * hash) + getNumSnapshots();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * This section records the information about snapshot
     * NAME: SNAPSHOT
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        snapshotCounter_ = 0;
        snapshottableDir_ = emptyLongList();
        numSnapshots_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.snapshotCounter_ = snapshotCounter_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          snapshottableDir_.makeImmutable();
          result.snapshottableDir_ = snapshottableDir_;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.numSnapshots_ = numSnapshots_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection.getDefaultInstance()) return this;
        if (other.hasSnapshotCounter()) {
          setSnapshotCounter(other.getSnapshotCounter());
        }
        if (!other.snapshottableDir_.isEmpty()) {
          if (snapshottableDir_.isEmpty()) {
            snapshottableDir_ = other.snapshottableDir_;
            snapshottableDir_.makeImmutable();
            bitField0_ |= 0x00000002;
          } else {
            ensureSnapshottableDirIsMutable();
            snapshottableDir_.addAll(other.snapshottableDir_);
          }
          onChanged();
        }
        if (other.hasNumSnapshots()) {
          setNumSnapshots(other.getNumSnapshots());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                snapshotCounter_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                long v = input.readUInt64();
                ensureSnapshottableDirIsMutable();
                snapshottableDir_.addLong(v);
                break;
              } // case 16
              case 18: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureSnapshottableDirIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  snapshottableDir_.addLong(input.readUInt64());
                }
                input.popLimit(limit);
                break;
              } // case 18
              case 24: {
                numSnapshots_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int snapshotCounter_ ;
      /**
       * <code>optional uint32 snapshotCounter = 1;</code>
       * @return Whether the snapshotCounter field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotCounter() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 snapshotCounter = 1;</code>
       * @return The snapshotCounter.
       */
      @java.lang.Override
      public int getSnapshotCounter() {
        return snapshotCounter_;
      }
      /**
       * <code>optional uint32 snapshotCounter = 1;</code>
       * @param value The snapshotCounter to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotCounter(int value) {

        snapshotCounter_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 snapshotCounter = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotCounter() {
        bitField0_ = (bitField0_ & ~0x00000001);
        snapshotCounter_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList snapshottableDir_ = emptyLongList();
      private void ensureSnapshottableDirIsMutable() {
        if (!snapshottableDir_.isModifiable()) {
          snapshottableDir_ = makeMutableCopy(snapshottableDir_);
        }
        bitField0_ |= 0x00000002;
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @return A list containing the snapshottableDir.
       */
      public java.util.List<java.lang.Long>
          getSnapshottableDirList() {
        snapshottableDir_.makeImmutable();
        return snapshottableDir_;
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @return The count of snapshottableDir.
       */
      public int getSnapshottableDirCount() {
        return snapshottableDir_.size();
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The snapshottableDir at the given index.
       */
      public long getSnapshottableDir(int index) {
        return snapshottableDir_.getLong(index);
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The snapshottableDir to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshottableDir(
          int index, long value) {

        ensureSnapshottableDirIsMutable();
        snapshottableDir_.setLong(index, value);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @param value The snapshottableDir to add.
       * @return This builder for chaining.
       */
      public Builder addSnapshottableDir(long value) {

        ensureSnapshottableDirIsMutable();
        snapshottableDir_.addLong(value);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @param values The snapshottableDir to add.
       * @return This builder for chaining.
       */
      public Builder addAllSnapshottableDir(
          java.lang.Iterable<? extends java.lang.Long> values) {
        ensureSnapshottableDirIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, snapshottableDir_);
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint64 snapshottableDir = 2 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshottableDir() {
        snapshottableDir_ = emptyLongList();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }

      private int numSnapshots_ ;
      /**
       * <pre>
       * total number of snapshots
       * </pre>
       *
       * <code>optional uint32 numSnapshots = 3;</code>
       * @return Whether the numSnapshots field is set.
       */
      @java.lang.Override
      public boolean hasNumSnapshots() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * total number of snapshots
       * </pre>
       *
       * <code>optional uint32 numSnapshots = 3;</code>
       * @return The numSnapshots.
       */
      @java.lang.Override
      public int getNumSnapshots() {
        return numSnapshots_;
      }
      /**
       * <pre>
       * total number of snapshots
       * </pre>
       *
       * <code>optional uint32 numSnapshots = 3;</code>
       * @param value The numSnapshots to set.
       * @return This builder for chaining.
       */
      public Builder setNumSnapshots(int value) {

        numSnapshots_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * total number of snapshots
       * </pre>
       *
       * <code>optional uint32 numSnapshots = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumSnapshots() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numSnapshots_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotSection>() {
      @java.lang.Override
      public SnapshotSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {
  }
  /**
   * <pre>
   **
   * This section records information about snapshot diffs
   * NAME: SNAPSHOT_DIFF
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection}
   */
  public static final class SnapshotDiffSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection)
      SnapshotDiffSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffSection.newBuilder() to construct.
    private SnapshotDiffSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class);
    }

    public interface CreatedListEntryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional bytes name = 1;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <code>optional bytes name = 1;</code>
       * @return The name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getName();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry}
     */
    public static final class CreatedListEntry extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
        CreatedListEntryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use CreatedListEntry.newBuilder() to construct.
      private CreatedListEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private CreatedListEntry() {
        name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new CreatedListEntry();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class);
      }

      private int bitField0_;
      public static final int NAME_FIELD_NUMBER = 1;
      private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes name = 1;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional bytes name = 1;</code>
       * @return The name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
        return name_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeBytes(1, name_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(1, name_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) obj;

        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000001;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry.getDefaultInstance()) return this;
          if (other.hasName()) {
            setName(other.getName());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 10: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 10
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes name = 1;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional bytes name = 1;</code>
         * @return The name.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
          return name_;
        }
        /**
         * <code>optional bytes name = 1;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes name = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000001);
          name_ = getDefaultInstance().getName();
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.CreatedListEntry)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CreatedListEntry>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CreatedListEntry>() {
        @java.lang.Override
        public CreatedListEntry parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<CreatedListEntry> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<CreatedListEntry> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface DirectoryDiffOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      boolean hasSnapshotId();
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      int getSnapshotId();

      /**
       * <code>optional uint32 childrenSize = 2;</code>
       * @return Whether the childrenSize field is set.
       */
      boolean hasChildrenSize();
      /**
       * <code>optional uint32 childrenSize = 2;</code>
       * @return The childrenSize.
       */
      int getChildrenSize();

      /**
       * <code>optional bool isSnapshotRoot = 3;</code>
       * @return Whether the isSnapshotRoot field is set.
       */
      boolean hasIsSnapshotRoot();
      /**
       * <code>optional bool isSnapshotRoot = 3;</code>
       * @return The isSnapshotRoot.
       */
      boolean getIsSnapshotRoot();

      /**
       * <code>optional bytes name = 4;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <code>optional bytes name = 4;</code>
       * @return The name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getName();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       * @return Whether the snapshotCopy field is set.
       */
      boolean hasSnapshotCopy();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       * @return The snapshotCopy.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder();

      /**
       * <code>optional uint32 createdListSize = 6;</code>
       * @return Whether the createdListSize field is set.
       */
      boolean hasCreatedListSize();
      /**
       * <code>optional uint32 createdListSize = 6;</code>
       * @return The createdListSize.
       */
      int getCreatedListSize();

      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @return A list containing the deletedINode.
       */
      java.util.List<java.lang.Long> getDeletedINodeList();
      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @return The count of deletedINode.
       */
      int getDeletedINodeCount();
      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The deletedINode at the given index.
       */
      long getDeletedINode(int index);

      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @return A list containing the deletedINodeRef.
       */
      java.util.List<java.lang.Integer> getDeletedINodeRefList();
      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @return The count of deletedINodeRef.
       */
      int getDeletedINodeRefCount();
      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The deletedINodeRef at the given index.
       */
      int getDeletedINodeRef(int index);
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff}
     */
    public static final class DirectoryDiff extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
        DirectoryDiffOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use DirectoryDiff.newBuilder() to construct.
      private DirectoryDiff(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private DirectoryDiff() {
        name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        deletedINode_ = emptyLongList();
        deletedINodeRef_ = emptyIntList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new DirectoryDiff();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class);
      }

      private int bitField0_;
      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
      private int snapshotId_ = 0;
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      @java.lang.Override
      public int getSnapshotId() {
        return snapshotId_;
      }

      public static final int CHILDRENSIZE_FIELD_NUMBER = 2;
      private int childrenSize_ = 0;
      /**
       * <code>optional uint32 childrenSize = 2;</code>
       * @return Whether the childrenSize field is set.
       */
      @java.lang.Override
      public boolean hasChildrenSize() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint32 childrenSize = 2;</code>
       * @return The childrenSize.
       */
      @java.lang.Override
      public int getChildrenSize() {
        return childrenSize_;
      }

      public static final int ISSNAPSHOTROOT_FIELD_NUMBER = 3;
      private boolean isSnapshotRoot_ = false;
      /**
       * <code>optional bool isSnapshotRoot = 3;</code>
       * @return Whether the isSnapshotRoot field is set.
       */
      @java.lang.Override
      public boolean hasIsSnapshotRoot() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bool isSnapshotRoot = 3;</code>
       * @return The isSnapshotRoot.
       */
      @java.lang.Override
      public boolean getIsSnapshotRoot() {
        return isSnapshotRoot_;
      }

      public static final int NAME_FIELD_NUMBER = 4;
      private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes name = 4;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional bytes name = 4;</code>
       * @return The name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
        return name_;
      }

      public static final int SNAPSHOTCOPY_FIELD_NUMBER = 5;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       * @return Whether the snapshotCopy field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotCopy() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       * @return The snapshotCopy.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() {
        return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() {
        return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_;
      }

      public static final int CREATEDLISTSIZE_FIELD_NUMBER = 6;
      private int createdListSize_ = 0;
      /**
       * <code>optional uint32 createdListSize = 6;</code>
       * @return Whether the createdListSize field is set.
       */
      @java.lang.Override
      public boolean hasCreatedListSize() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint32 createdListSize = 6;</code>
       * @return The createdListSize.
       */
      @java.lang.Override
      public int getCreatedListSize() {
        return createdListSize_;
      }

      public static final int DELETEDINODE_FIELD_NUMBER = 7;
      @SuppressWarnings("serial")
      private org.apache.hadoop.thirdparty.protobuf.Internal.LongList deletedINode_ =
          emptyLongList();
      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @return A list containing the deletedINode.
       */
      @java.lang.Override
      public java.util.List<java.lang.Long>
          getDeletedINodeList() {
        return deletedINode_;
      }
      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @return The count of deletedINode.
       */
      public int getDeletedINodeCount() {
        return deletedINode_.size();
      }
      /**
       * <pre>
       * id of deleted inodes
       * </pre>
       *
       * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The deletedINode at the given index.
       */
      public long getDeletedINode(int index) {
        return deletedINode_.getLong(index);
      }
      private int deletedINodeMemoizedSerializedSize = -1;

      public static final int DELETEDINODEREF_FIELD_NUMBER = 8;
      @SuppressWarnings("serial")
      private org.apache.hadoop.thirdparty.protobuf.Internal.IntList deletedINodeRef_ =
          emptyIntList();
      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @return A list containing the deletedINodeRef.
       */
      @java.lang.Override
      public java.util.List<java.lang.Integer>
          getDeletedINodeRefList() {
        return deletedINodeRef_;
      }
      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @return The count of deletedINodeRef.
       */
      public int getDeletedINodeRefCount() {
        return deletedINodeRef_.size();
      }
      /**
       * <pre>
       * id of reference nodes in the deleted list
       * </pre>
       *
       * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The deletedINodeRef at the given index.
       */
      public int getDeletedINodeRef(int index) {
        return deletedINodeRef_.getInt(index);
      }
      private int deletedINodeRefMemoizedSerializedSize = -1;

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (hasSnapshotCopy()) {
          if (!getSnapshotCopy().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        getSerializedSize();
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt32(2, childrenSize_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeBool(3, isSnapshotRoot_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeBytes(4, name_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          output.writeMessage(5, getSnapshotCopy());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          output.writeUInt32(6, createdListSize_);
        }
        if (getDeletedINodeList().size() > 0) {
          output.writeUInt32NoTag(58);
          output.writeUInt32NoTag(deletedINodeMemoizedSerializedSize);
        }
        for (int i = 0; i < deletedINode_.size(); i++) {
          output.writeUInt64NoTag(deletedINode_.getLong(i));
        }
        if (getDeletedINodeRefList().size() > 0) {
          output.writeUInt32NoTag(66);
          output.writeUInt32NoTag(deletedINodeRefMemoizedSerializedSize);
        }
        for (int i = 0; i < deletedINodeRef_.size(); i++) {
          output.writeUInt32NoTag(deletedINodeRef_.getInt(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(2, childrenSize_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBoolSize(3, isSnapshotRoot_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(4, name_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(5, getSnapshotCopy());
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(6, createdListSize_);
        }
        {
          int dataSize = 0;
          for (int i = 0; i < deletedINode_.size(); i++) {
            dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeUInt64SizeNoTag(deletedINode_.getLong(i));
          }
          size += dataSize;
          if (!getDeletedINodeList().isEmpty()) {
            size += 1;
            size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
                .computeInt32SizeNoTag(dataSize);
          }
          deletedINodeMemoizedSerializedSize = dataSize;
        }
        {
          int dataSize = 0;
          for (int i = 0; i < deletedINodeRef_.size(); i++) {
            dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeUInt32SizeNoTag(deletedINodeRef_.getInt(i));
          }
          size += dataSize;
          if (!getDeletedINodeRefList().isEmpty()) {
            size += 1;
            size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
                .computeInt32SizeNoTag(dataSize);
          }
          deletedINodeRefMemoizedSerializedSize = dataSize;
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) obj;

        if (hasSnapshotId() != other.hasSnapshotId()) return false;
        if (hasSnapshotId()) {
          if (getSnapshotId()
              != other.getSnapshotId()) return false;
        }
        if (hasChildrenSize() != other.hasChildrenSize()) return false;
        if (hasChildrenSize()) {
          if (getChildrenSize()
              != other.getChildrenSize()) return false;
        }
        if (hasIsSnapshotRoot() != other.hasIsSnapshotRoot()) return false;
        if (hasIsSnapshotRoot()) {
          if (getIsSnapshotRoot()
              != other.getIsSnapshotRoot()) return false;
        }
        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (hasSnapshotCopy() != other.hasSnapshotCopy()) return false;
        if (hasSnapshotCopy()) {
          if (!getSnapshotCopy()
              .equals(other.getSnapshotCopy())) return false;
        }
        if (hasCreatedListSize() != other.hasCreatedListSize()) return false;
        if (hasCreatedListSize()) {
          if (getCreatedListSize()
              != other.getCreatedListSize()) return false;
        }
        if (!getDeletedINodeList()
            .equals(other.getDeletedINodeList())) return false;
        if (!getDeletedINodeRefList()
            .equals(other.getDeletedINodeRefList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasSnapshotId()) {
          hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER;
          hash = (53 * hash) + getSnapshotId();
        }
        if (hasChildrenSize()) {
          hash = (37 * hash) + CHILDRENSIZE_FIELD_NUMBER;
          hash = (53 * hash) + getChildrenSize();
        }
        if (hasIsSnapshotRoot()) {
          hash = (37 * hash) + ISSNAPSHOTROOT_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
              getIsSnapshotRoot());
        }
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        if (hasSnapshotCopy()) {
          hash = (37 * hash) + SNAPSHOTCOPY_FIELD_NUMBER;
          hash = (53 * hash) + getSnapshotCopy().hashCode();
        }
        if (hasCreatedListSize()) {
          hash = (37 * hash) + CREATEDLISTSIZE_FIELD_NUMBER;
          hash = (53 * hash) + getCreatedListSize();
        }
        if (getDeletedINodeCount() > 0) {
          hash = (37 * hash) + DELETEDINODE_FIELD_NUMBER;
          hash = (53 * hash) + getDeletedINodeList().hashCode();
        }
        if (getDeletedINodeRefCount() > 0) {
          hash = (37 * hash) + DELETEDINODEREF_FIELD_NUMBER;
          hash = (53 * hash) + getDeletedINodeRefList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiffOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getSnapshotCopyFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          snapshotId_ = 0;
          childrenSize_ = 0;
          isSnapshotRoot_ = false;
          name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          snapshotCopy_ = null;
          if (snapshotCopyBuilder_ != null) {
            snapshotCopyBuilder_.dispose();
            snapshotCopyBuilder_ = null;
          }
          createdListSize_ = 0;
          deletedINode_ = emptyLongList();
          deletedINodeRef_ = emptyIntList();
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.snapshotId_ = snapshotId_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.childrenSize_ = childrenSize_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.isSnapshotRoot_ = isSnapshotRoot_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000008;
          }
          if (((from_bitField0_ & 0x00000010) != 0)) {
            result.snapshotCopy_ = snapshotCopyBuilder_ == null
                ? snapshotCopy_
                : snapshotCopyBuilder_.build();
            to_bitField0_ |= 0x00000010;
          }
          if (((from_bitField0_ & 0x00000020) != 0)) {
            result.createdListSize_ = createdListSize_;
            to_bitField0_ |= 0x00000020;
          }
          if (((from_bitField0_ & 0x00000040) != 0)) {
            deletedINode_.makeImmutable();
            result.deletedINode_ = deletedINode_;
          }
          if (((from_bitField0_ & 0x00000080) != 0)) {
            deletedINodeRef_.makeImmutable();
            result.deletedINodeRef_ = deletedINodeRef_;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff.getDefaultInstance()) return this;
          if (other.hasSnapshotId()) {
            setSnapshotId(other.getSnapshotId());
          }
          if (other.hasChildrenSize()) {
            setChildrenSize(other.getChildrenSize());
          }
          if (other.hasIsSnapshotRoot()) {
            setIsSnapshotRoot(other.getIsSnapshotRoot());
          }
          if (other.hasName()) {
            setName(other.getName());
          }
          if (other.hasSnapshotCopy()) {
            mergeSnapshotCopy(other.getSnapshotCopy());
          }
          if (other.hasCreatedListSize()) {
            setCreatedListSize(other.getCreatedListSize());
          }
          if (!other.deletedINode_.isEmpty()) {
            if (deletedINode_.isEmpty()) {
              deletedINode_ = other.deletedINode_;
              deletedINode_.makeImmutable();
              bitField0_ |= 0x00000040;
            } else {
              ensureDeletedINodeIsMutable();
              deletedINode_.addAll(other.deletedINode_);
            }
            onChanged();
          }
          if (!other.deletedINodeRef_.isEmpty()) {
            if (deletedINodeRef_.isEmpty()) {
              deletedINodeRef_ = other.deletedINodeRef_;
              deletedINodeRef_.makeImmutable();
              bitField0_ |= 0x00000080;
            } else {
              ensureDeletedINodeRefIsMutable();
              deletedINodeRef_.addAll(other.deletedINodeRef_);
            }
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (hasSnapshotCopy()) {
            if (!getSnapshotCopy().isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  snapshotId_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  childrenSize_ = input.readUInt32();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 24: {
                  isSnapshotRoot_ = input.readBool();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                case 34: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 34
                case 42: {
                  input.readMessage(
                      getSnapshotCopyFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000010;
                  break;
                } // case 42
                case 48: {
                  createdListSize_ = input.readUInt32();
                  bitField0_ |= 0x00000020;
                  break;
                } // case 48
                case 56: {
                  long v = input.readUInt64();
                  ensureDeletedINodeIsMutable();
                  deletedINode_.addLong(v);
                  break;
                } // case 56
                case 58: {
                  int length = input.readRawVarint32();
                  int limit = input.pushLimit(length);
                  ensureDeletedINodeIsMutable();
                  while (input.getBytesUntilLimit() > 0) {
                    deletedINode_.addLong(input.readUInt64());
                  }
                  input.popLimit(limit);
                  break;
                } // case 58
                case 64: {
                  int v = input.readUInt32();
                  ensureDeletedINodeRefIsMutable();
                  deletedINodeRef_.addInt(v);
                  break;
                } // case 64
                case 66: {
                  int length = input.readRawVarint32();
                  int limit = input.pushLimit(length);
                  ensureDeletedINodeRefIsMutable();
                  while (input.getBytesUntilLimit() > 0) {
                    deletedINodeRef_.addInt(input.readUInt32());
                  }
                  input.popLimit(limit);
                  break;
                } // case 66
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int snapshotId_ ;
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return Whether the snapshotId field is set.
         */
        @java.lang.Override
        public boolean hasSnapshotId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return The snapshotId.
         */
        @java.lang.Override
        public int getSnapshotId() {
          return snapshotId_;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @param value The snapshotId to set.
         * @return This builder for chaining.
         */
        public Builder setSnapshotId(int value) {

          snapshotId_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearSnapshotId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          snapshotId_ = 0;
          onChanged();
          return this;
        }

        private int childrenSize_ ;
        /**
         * <code>optional uint32 childrenSize = 2;</code>
         * @return Whether the childrenSize field is set.
         */
        @java.lang.Override
        public boolean hasChildrenSize() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint32 childrenSize = 2;</code>
         * @return The childrenSize.
         */
        @java.lang.Override
        public int getChildrenSize() {
          return childrenSize_;
        }
        /**
         * <code>optional uint32 childrenSize = 2;</code>
         * @param value The childrenSize to set.
         * @return This builder for chaining.
         */
        public Builder setChildrenSize(int value) {

          childrenSize_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 childrenSize = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearChildrenSize() {
          bitField0_ = (bitField0_ & ~0x00000002);
          childrenSize_ = 0;
          onChanged();
          return this;
        }

        private boolean isSnapshotRoot_ ;
        /**
         * <code>optional bool isSnapshotRoot = 3;</code>
         * @return Whether the isSnapshotRoot field is set.
         */
        @java.lang.Override
        public boolean hasIsSnapshotRoot() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional bool isSnapshotRoot = 3;</code>
         * @return The isSnapshotRoot.
         */
        @java.lang.Override
        public boolean getIsSnapshotRoot() {
          return isSnapshotRoot_;
        }
        /**
         * <code>optional bool isSnapshotRoot = 3;</code>
         * @param value The isSnapshotRoot to set.
         * @return This builder for chaining.
         */
        public Builder setIsSnapshotRoot(boolean value) {

          isSnapshotRoot_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional bool isSnapshotRoot = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearIsSnapshotRoot() {
          bitField0_ = (bitField0_ & ~0x00000004);
          isSnapshotRoot_ = false;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes name = 4;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional bytes name = 4;</code>
         * @return The name.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
          return name_;
        }
        /**
         * <code>optional bytes name = 4;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes name = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000008);
          name_ = getDefaultInstance().getName();
          onChanged();
          return this;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory snapshotCopy_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> snapshotCopyBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         * @return Whether the snapshotCopy field is set.
         */
        public boolean hasSnapshotCopy() {
          return ((bitField0_ & 0x00000010) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         * @return The snapshotCopy.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory getSnapshotCopy() {
          if (snapshotCopyBuilder_ == null) {
            return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_;
          } else {
            return snapshotCopyBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
          if (snapshotCopyBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            snapshotCopy_ = value;
          } else {
            snapshotCopyBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public Builder setSnapshotCopy(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder builderForValue) {
          if (snapshotCopyBuilder_ == null) {
            snapshotCopy_ = builderForValue.build();
          } else {
            snapshotCopyBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory value) {
          if (snapshotCopyBuilder_ == null) {
            if (((bitField0_ & 0x00000010) != 0) &&
              snapshotCopy_ != null &&
              snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance()) {
              getSnapshotCopyBuilder().mergeFrom(value);
            } else {
              snapshotCopy_ = value;
            }
          } else {
            snapshotCopyBuilder_.mergeFrom(value);
          }
          if (snapshotCopy_ != null) {
            bitField0_ |= 0x00000010;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public Builder clearSnapshotCopy() {
          bitField0_ = (bitField0_ & ~0x00000010);
          snapshotCopy_ = null;
          if (snapshotCopyBuilder_ != null) {
            snapshotCopyBuilder_.dispose();
            snapshotCopyBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder getSnapshotCopyBuilder() {
          bitField0_ |= 0x00000010;
          onChanged();
          return getSnapshotCopyFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder getSnapshotCopyOrBuilder() {
          if (snapshotCopyBuilder_ != null) {
            return snapshotCopyBuilder_.getMessageOrBuilder();
          } else {
            return snapshotCopy_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.getDefaultInstance() : snapshotCopy_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeDirectory snapshotCopy = 5;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder> 
            getSnapshotCopyFieldBuilder() {
          if (snapshotCopyBuilder_ == null) {
            snapshotCopyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectoryOrBuilder>(
                    getSnapshotCopy(),
                    getParentForChildren(),
                    isClean());
            snapshotCopy_ = null;
          }
          return snapshotCopyBuilder_;
        }

        private int createdListSize_ ;
        /**
         * <code>optional uint32 createdListSize = 6;</code>
         * @return Whether the createdListSize field is set.
         */
        @java.lang.Override
        public boolean hasCreatedListSize() {
          return ((bitField0_ & 0x00000020) != 0);
        }
        /**
         * <code>optional uint32 createdListSize = 6;</code>
         * @return The createdListSize.
         */
        @java.lang.Override
        public int getCreatedListSize() {
          return createdListSize_;
        }
        /**
         * <code>optional uint32 createdListSize = 6;</code>
         * @param value The createdListSize to set.
         * @return This builder for chaining.
         */
        public Builder setCreatedListSize(int value) {

          createdListSize_ = value;
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 createdListSize = 6;</code>
         * @return This builder for chaining.
         */
        public Builder clearCreatedListSize() {
          bitField0_ = (bitField0_ & ~0x00000020);
          createdListSize_ = 0;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.Internal.LongList deletedINode_ = emptyLongList();
        private void ensureDeletedINodeIsMutable() {
          if (!deletedINode_.isModifiable()) {
            deletedINode_ = makeMutableCopy(deletedINode_);
          }
          bitField0_ |= 0x00000040;
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @return A list containing the deletedINode.
         */
        public java.util.List<java.lang.Long>
            getDeletedINodeList() {
          deletedINode_.makeImmutable();
          return deletedINode_;
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @return The count of deletedINode.
         */
        public int getDeletedINodeCount() {
          return deletedINode_.size();
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @param index The index of the element to return.
         * @return The deletedINode at the given index.
         */
        public long getDeletedINode(int index) {
          return deletedINode_.getLong(index);
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @param index The index to set the value at.
         * @param value The deletedINode to set.
         * @return This builder for chaining.
         */
        public Builder setDeletedINode(
            int index, long value) {

          ensureDeletedINodeIsMutable();
          deletedINode_.setLong(index, value);
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @param value The deletedINode to add.
         * @return This builder for chaining.
         */
        public Builder addDeletedINode(long value) {

          ensureDeletedINodeIsMutable();
          deletedINode_.addLong(value);
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @param values The deletedINode to add.
         * @return This builder for chaining.
         */
        public Builder addAllDeletedINode(
            java.lang.Iterable<? extends java.lang.Long> values) {
          ensureDeletedINodeIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, deletedINode_);
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of deleted inodes
         * </pre>
         *
         * <code>repeated uint64 deletedINode = 7 [packed = true];</code>
         * @return This builder for chaining.
         */
        public Builder clearDeletedINode() {
          deletedINode_ = emptyLongList();
          bitField0_ = (bitField0_ & ~0x00000040);
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.Internal.IntList deletedINodeRef_ = emptyIntList();
        private void ensureDeletedINodeRefIsMutable() {
          if (!deletedINodeRef_.isModifiable()) {
            deletedINodeRef_ = makeMutableCopy(deletedINodeRef_);
          }
          bitField0_ |= 0x00000080;
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @return A list containing the deletedINodeRef.
         */
        public java.util.List<java.lang.Integer>
            getDeletedINodeRefList() {
          deletedINodeRef_.makeImmutable();
          return deletedINodeRef_;
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @return The count of deletedINodeRef.
         */
        public int getDeletedINodeRefCount() {
          return deletedINodeRef_.size();
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @param index The index of the element to return.
         * @return The deletedINodeRef at the given index.
         */
        public int getDeletedINodeRef(int index) {
          return deletedINodeRef_.getInt(index);
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @param index The index to set the value at.
         * @param value The deletedINodeRef to set.
         * @return This builder for chaining.
         */
        public Builder setDeletedINodeRef(
            int index, int value) {

          ensureDeletedINodeRefIsMutable();
          deletedINodeRef_.setInt(index, value);
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @param value The deletedINodeRef to add.
         * @return This builder for chaining.
         */
        public Builder addDeletedINodeRef(int value) {

          ensureDeletedINodeRefIsMutable();
          deletedINodeRef_.addInt(value);
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @param values The deletedINodeRef to add.
         * @return This builder for chaining.
         */
        public Builder addAllDeletedINodeRef(
            java.lang.Iterable<? extends java.lang.Integer> values) {
          ensureDeletedINodeRefIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, deletedINodeRef_);
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <pre>
         * id of reference nodes in the deleted list
         * </pre>
         *
         * <code>repeated uint32 deletedINodeRef = 8 [packed = true];</code>
         * @return This builder for chaining.
         */
        public Builder clearDeletedINodeRef() {
          deletedINodeRef_ = emptyIntList();
          bitField0_ = (bitField0_ & ~0x00000080);
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DirectoryDiff)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryDiff>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DirectoryDiff>() {
        @java.lang.Override
        public DirectoryDiff parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryDiff> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryDiff> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DirectoryDiff getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface FileDiffOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      boolean hasSnapshotId();
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      int getSnapshotId();

      /**
       * <code>optional uint64 fileSize = 2;</code>
       * @return Whether the fileSize field is set.
       */
      boolean hasFileSize();
      /**
       * <code>optional uint64 fileSize = 2;</code>
       * @return The fileSize.
       */
      long getFileSize();

      /**
       * <code>optional bytes name = 3;</code>
       * @return Whether the name field is set.
       */
      boolean hasName();
      /**
       * <code>optional bytes name = 3;</code>
       * @return The name.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getName();

      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       * @return Whether the snapshotCopy field is set.
       */
      boolean hasSnapshotCopy();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       * @return The snapshotCopy.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy();
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder();

      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> 
          getBlocksList();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index);
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      int getBlocksCount();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlocksOrBuilderList();
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
          int index);
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff}
     */
    public static final class FileDiff extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
        FileDiffOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use FileDiff.newBuilder() to construct.
      private FileDiff(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private FileDiff() {
        name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        blocks_ = java.util.Collections.emptyList();
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new FileDiff();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class);
      }

      private int bitField0_;
      public static final int SNAPSHOTID_FIELD_NUMBER = 1;
      private int snapshotId_ = 0;
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return Whether the snapshotId field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 snapshotId = 1;</code>
       * @return The snapshotId.
       */
      @java.lang.Override
      public int getSnapshotId() {
        return snapshotId_;
      }

      public static final int FILESIZE_FIELD_NUMBER = 2;
      private long fileSize_ = 0L;
      /**
       * <code>optional uint64 fileSize = 2;</code>
       * @return Whether the fileSize field is set.
       */
      @java.lang.Override
      public boolean hasFileSize() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 fileSize = 2;</code>
       * @return The fileSize.
       */
      @java.lang.Override
      public long getFileSize() {
        return fileSize_;
      }

      public static final int NAME_FIELD_NUMBER = 3;
      private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes name = 3;</code>
       * @return Whether the name field is set.
       */
      @java.lang.Override
      public boolean hasName() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes name = 3;</code>
       * @return The name.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
        return name_;
      }

      public static final int SNAPSHOTCOPY_FIELD_NUMBER = 4;
      private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_;
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       * @return Whether the snapshotCopy field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotCopy() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       * @return The snapshotCopy.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() {
        return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_;
      }
      /**
       * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() {
        return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_;
      }

      public static final int BLOCKS_FIELD_NUMBER = 5;
      @SuppressWarnings("serial")
      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_;
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      @java.lang.Override
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
        return blocks_;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      @java.lang.Override
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
          getBlocksOrBuilderList() {
        return blocks_;
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      @java.lang.Override
      public int getBlocksCount() {
        return blocks_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
        return blocks_.get(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        return blocks_.get(index);
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (hasSnapshotCopy()) {
          if (!getSnapshotCopy().isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            memoizedIsInitialized = 0;
            return false;
          }
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, fileSize_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeBytes(3, name_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          output.writeMessage(4, getSnapshotCopy());
        }
        for (int i = 0; i < blocks_.size(); i++) {
          output.writeMessage(5, blocks_.get(i));
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, snapshotId_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, fileSize_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(3, name_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(4, getSnapshotCopy());
        }
        for (int i = 0; i < blocks_.size(); i++) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(5, blocks_.get(i));
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) obj;

        if (hasSnapshotId() != other.hasSnapshotId()) return false;
        if (hasSnapshotId()) {
          if (getSnapshotId()
              != other.getSnapshotId()) return false;
        }
        if (hasFileSize() != other.hasFileSize()) return false;
        if (hasFileSize()) {
          if (getFileSize()
              != other.getFileSize()) return false;
        }
        if (hasName() != other.hasName()) return false;
        if (hasName()) {
          if (!getName()
              .equals(other.getName())) return false;
        }
        if (hasSnapshotCopy() != other.hasSnapshotCopy()) return false;
        if (hasSnapshotCopy()) {
          if (!getSnapshotCopy()
              .equals(other.getSnapshotCopy())) return false;
        }
        if (!getBlocksList()
            .equals(other.getBlocksList())) return false;
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasSnapshotId()) {
          hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER;
          hash = (53 * hash) + getSnapshotId();
        }
        if (hasFileSize()) {
          hash = (37 * hash) + FILESIZE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getFileSize());
        }
        if (hasName()) {
          hash = (37 * hash) + NAME_FIELD_NUMBER;
          hash = (53 * hash) + getName().hashCode();
        }
        if (hasSnapshotCopy()) {
          hash = (37 * hash) + SNAPSHOTCOPY_FIELD_NUMBER;
          hash = (53 * hash) + getSnapshotCopy().hashCode();
        }
        if (getBlocksCount() > 0) {
          hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
          hash = (53 * hash) + getBlocksList().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiffOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.newBuilder()
        private Builder() {
          maybeForceBuilderInitialization();
        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);
          maybeForceBuilderInitialization();
        }
        private void maybeForceBuilderInitialization() {
          if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                  .alwaysUseFieldBuilders) {
            getSnapshotCopyFieldBuilder();
            getBlocksFieldBuilder();
          }
        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          snapshotId_ = 0;
          fileSize_ = 0L;
          name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          snapshotCopy_ = null;
          if (snapshotCopyBuilder_ != null) {
            snapshotCopyBuilder_.dispose();
            snapshotCopyBuilder_ = null;
          }
          if (blocksBuilder_ == null) {
            blocks_ = java.util.Collections.emptyList();
          } else {
            blocks_ = null;
            blocksBuilder_.clear();
          }
          bitField0_ = (bitField0_ & ~0x00000010);
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff(this);
          buildPartialRepeatedFields(result);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result) {
          if (blocksBuilder_ == null) {
            if (((bitField0_ & 0x00000010) != 0)) {
              blocks_ = java.util.Collections.unmodifiableList(blocks_);
              bitField0_ = (bitField0_ & ~0x00000010);
            }
            result.blocks_ = blocks_;
          } else {
            result.blocks_ = blocksBuilder_.build();
          }
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.snapshotId_ = snapshotId_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.fileSize_ = fileSize_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.name_ = name_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.snapshotCopy_ = snapshotCopyBuilder_ == null
                ? snapshotCopy_
                : snapshotCopyBuilder_.build();
            to_bitField0_ |= 0x00000008;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff.getDefaultInstance()) return this;
          if (other.hasSnapshotId()) {
            setSnapshotId(other.getSnapshotId());
          }
          if (other.hasFileSize()) {
            setFileSize(other.getFileSize());
          }
          if (other.hasName()) {
            setName(other.getName());
          }
          if (other.hasSnapshotCopy()) {
            mergeSnapshotCopy(other.getSnapshotCopy());
          }
          if (blocksBuilder_ == null) {
            if (!other.blocks_.isEmpty()) {
              if (blocks_.isEmpty()) {
                blocks_ = other.blocks_;
                bitField0_ = (bitField0_ & ~0x00000010);
              } else {
                ensureBlocksIsMutable();
                blocks_.addAll(other.blocks_);
              }
              onChanged();
            }
          } else {
            if (!other.blocks_.isEmpty()) {
              if (blocksBuilder_.isEmpty()) {
                blocksBuilder_.dispose();
                blocksBuilder_ = null;
                blocks_ = other.blocks_;
                bitField0_ = (bitField0_ & ~0x00000010);
                blocksBuilder_ = 
                  org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                     getBlocksFieldBuilder() : null;
              } else {
                blocksBuilder_.addAllMessages(other.blocks_);
              }
            }
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (hasSnapshotCopy()) {
            if (!getSnapshotCopy().isInitialized()) {
              return false;
            }
          }
          for (int i = 0; i < getBlocksCount(); i++) {
            if (!getBlocks(i).isInitialized()) {
              return false;
            }
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  snapshotId_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  fileSize_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 26: {
                  name_ = input.readBytes();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 26
                case 34: {
                  input.readMessage(
                      getSnapshotCopyFieldBuilder().getBuilder(),
                      extensionRegistry);
                  bitField0_ |= 0x00000008;
                  break;
                } // case 34
                case 42: {
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto m =
                      input.readMessage(
                          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.PARSER,
                          extensionRegistry);
                  if (blocksBuilder_ == null) {
                    ensureBlocksIsMutable();
                    blocks_.add(m);
                  } else {
                    blocksBuilder_.addMessage(m);
                  }
                  break;
                } // case 42
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int snapshotId_ ;
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return Whether the snapshotId field is set.
         */
        @java.lang.Override
        public boolean hasSnapshotId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return The snapshotId.
         */
        @java.lang.Override
        public int getSnapshotId() {
          return snapshotId_;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @param value The snapshotId to set.
         * @return This builder for chaining.
         */
        public Builder setSnapshotId(int value) {

          snapshotId_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 snapshotId = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearSnapshotId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          snapshotId_ = 0;
          onChanged();
          return this;
        }

        private long fileSize_ ;
        /**
         * <code>optional uint64 fileSize = 2;</code>
         * @return Whether the fileSize field is set.
         */
        @java.lang.Override
        public boolean hasFileSize() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint64 fileSize = 2;</code>
         * @return The fileSize.
         */
        @java.lang.Override
        public long getFileSize() {
          return fileSize_;
        }
        /**
         * <code>optional uint64 fileSize = 2;</code>
         * @param value The fileSize to set.
         * @return This builder for chaining.
         */
        public Builder setFileSize(long value) {

          fileSize_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 fileSize = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearFileSize() {
          bitField0_ = (bitField0_ & ~0x00000002);
          fileSize_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString name_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes name = 3;</code>
         * @return Whether the name field is set.
         */
        @java.lang.Override
        public boolean hasName() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @return The name.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getName() {
          return name_;
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @param value The name to set.
         * @return This builder for chaining.
         */
        public Builder setName(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          name_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes name = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearName() {
          bitField0_ = (bitField0_ & ~0x00000004);
          name_ = getDefaultInstance().getName();
          onChanged();
          return this;
        }

        private org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile snapshotCopy_;
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> snapshotCopyBuilder_;
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         * @return Whether the snapshotCopy field is set.
         */
        public boolean hasSnapshotCopy() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         * @return The snapshotCopy.
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile getSnapshotCopy() {
          if (snapshotCopyBuilder_ == null) {
            return snapshotCopy_ == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_;
          } else {
            return snapshotCopyBuilder_.getMessage();
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public Builder setSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
          if (snapshotCopyBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            snapshotCopy_ = value;
          } else {
            snapshotCopyBuilder_.setMessage(value);
          }
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public Builder setSnapshotCopy(
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder builderForValue) {
          if (snapshotCopyBuilder_ == null) {
            snapshotCopy_ = builderForValue.build();
          } else {
            snapshotCopyBuilder_.setMessage(builderForValue.build());
          }
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public Builder mergeSnapshotCopy(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile value) {
          if (snapshotCopyBuilder_ == null) {
            if (((bitField0_ & 0x00000008) != 0) &&
              snapshotCopy_ != null &&
              snapshotCopy_ != org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance()) {
              getSnapshotCopyBuilder().mergeFrom(value);
            } else {
              snapshotCopy_ = value;
            }
          } else {
            snapshotCopyBuilder_.mergeFrom(value);
          }
          if (snapshotCopy_ != null) {
            bitField0_ |= 0x00000008;
            onChanged();
          }
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public Builder clearSnapshotCopy() {
          bitField0_ = (bitField0_ & ~0x00000008);
          snapshotCopy_ = null;
          if (snapshotCopyBuilder_ != null) {
            snapshotCopyBuilder_.dispose();
            snapshotCopyBuilder_ = null;
          }
          onChanged();
          return this;
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder getSnapshotCopyBuilder() {
          bitField0_ |= 0x00000008;
          onChanged();
          return getSnapshotCopyFieldBuilder().getBuilder();
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder getSnapshotCopyOrBuilder() {
          if (snapshotCopyBuilder_ != null) {
            return snapshotCopyBuilder_.getMessageOrBuilder();
          } else {
            return snapshotCopy_ == null ?
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.getDefaultInstance() : snapshotCopy_;
          }
        }
        /**
         * <code>optional .hadoop.hdfs.fsimage.INodeSection.INodeFile snapshotCopy = 4;</code>
         */
        private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
            org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder> 
            getSnapshotCopyFieldBuilder() {
          if (snapshotCopyBuilder_ == null) {
            snapshotCopyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile.Builder, org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFileOrBuilder>(
                    getSnapshotCopy(),
                    getParentForChildren(),
                    isClean());
            snapshotCopy_ = null;
          }
          return snapshotCopyBuilder_;
        }

        private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> blocks_ =
          java.util.Collections.emptyList();
        private void ensureBlocksIsMutable() {
          if (!((bitField0_ & 0x00000010) != 0)) {
            blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto>(blocks_);
            bitField0_ |= 0x00000010;
           }
        }

        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> blocksBuilder_;

        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> getBlocksList() {
          if (blocksBuilder_ == null) {
            return java.util.Collections.unmodifiableList(blocks_);
          } else {
            return blocksBuilder_.getMessageList();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public int getBlocksCount() {
          if (blocksBuilder_ == null) {
            return blocks_.size();
          } else {
            return blocksBuilder_.getCount();
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getBlocks(int index) {
          if (blocksBuilder_ == null) {
            return blocks_.get(index);
          } else {
            return blocksBuilder_.getMessage(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder setBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.set(index, value);
            onChanged();
          } else {
            blocksBuilder_.setMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder setBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.set(index, builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.setMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.add(value);
            onChanged();
          } else {
            blocksBuilder_.addMessage(value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder addBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto value) {
          if (blocksBuilder_ == null) {
            if (value == null) {
              throw new NullPointerException();
            }
            ensureBlocksIsMutable();
            blocks_.add(index, value);
            onChanged();
          } else {
            blocksBuilder_.addMessage(index, value);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder addBlocks(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.add(builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.addMessage(builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder addBlocks(
            int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder builderForValue) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.add(index, builderForValue.build());
            onChanged();
          } else {
            blocksBuilder_.addMessage(index, builderForValue.build());
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder addAllBlocks(
            java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto> values) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
                values, blocks_);
            onChanged();
          } else {
            blocksBuilder_.addAllMessages(values);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder clearBlocks() {
          if (blocksBuilder_ == null) {
            blocks_ = java.util.Collections.emptyList();
            bitField0_ = (bitField0_ & ~0x00000010);
            onChanged();
          } else {
            blocksBuilder_.clear();
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public Builder removeBlocks(int index) {
          if (blocksBuilder_ == null) {
            ensureBlocksIsMutable();
            blocks_.remove(index);
            onChanged();
          } else {
            blocksBuilder_.remove(index);
          }
          return this;
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder getBlocksBuilder(
            int index) {
          return getBlocksFieldBuilder().getBuilder(index);
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder getBlocksOrBuilder(
            int index) {
          if (blocksBuilder_ == null) {
            return blocks_.get(index);  } else {
            return blocksBuilder_.getMessageOrBuilder(index);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
             getBlocksOrBuilderList() {
          if (blocksBuilder_ != null) {
            return blocksBuilder_.getMessageOrBuilderList();
          } else {
            return java.util.Collections.unmodifiableList(blocks_);
          }
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder() {
          return getBlocksFieldBuilder().addBuilder(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder addBlocksBuilder(
            int index) {
          return getBlocksFieldBuilder().addBuilder(
              index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance());
        }
        /**
         * <code>repeated .hadoop.hdfs.BlockProto blocks = 5;</code>
         */
        public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder> 
             getBlocksBuilderList() {
          return getBlocksFieldBuilder().getBuilderList();
        }
        private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder> 
            getBlocksFieldBuilder() {
          if (blocksBuilder_ == null) {
            blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder>(
                    blocks_,
                    ((bitField0_ & 0x00000010) != 0),
                    getParentForChildren(),
                    isClean());
            blocks_ = null;
          }
          return blocksBuilder_;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.FileDiff)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FileDiff>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FileDiff>() {
        @java.lang.Override
        public FileDiff parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<FileDiff> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<FileDiff> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.FileDiff getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface DiffEntryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
       * @return Whether the type field is set.
       */
      boolean hasType();
      /**
       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
       * @return The type.
       */
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType();

      /**
       * <code>optional uint64 inodeId = 2;</code>
       * @return Whether the inodeId field is set.
       */
      boolean hasInodeId();
      /**
       * <code>optional uint64 inodeId = 2;</code>
       * @return The inodeId.
       */
      long getInodeId();

      /**
       * <code>optional uint32 numOfDiff = 3;</code>
       * @return Whether the numOfDiff field is set.
       */
      boolean hasNumOfDiff();
      /**
       * <code>optional uint32 numOfDiff = 3;</code>
       * @return The numOfDiff.
       */
      int getNumOfDiff();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry}
     */
    public static final class DiffEntry extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
        DiffEntryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use DiffEntry.newBuilder() to construct.
      private DiffEntry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private DiffEntry() {
        type_ = 1;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new DiffEntry();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class);
      }

      /**
       * Protobuf enum {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type}
       */
      public enum Type
          implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
        /**
         * <code>FILEDIFF = 1;</code>
         */
        FILEDIFF(1),
        /**
         * <code>DIRECTORYDIFF = 2;</code>
         */
        DIRECTORYDIFF(2),
        ;

        /**
         * <code>FILEDIFF = 1;</code>
         */
        public static final int FILEDIFF_VALUE = 1;
        /**
         * <code>DIRECTORYDIFF = 2;</code>
         */
        public static final int DIRECTORYDIFF_VALUE = 2;


        public final int getNumber() {
          return value;
        }

        /**
         * @param value The numeric wire value of the corresponding enum entry.
         * @return The enum associated with the given numeric wire value.
         * @deprecated Use {@link #forNumber(int)} instead.
         */
        @java.lang.Deprecated
        public static Type valueOf(int value) {
          return forNumber(value);
        }

        /**
         * @param value The numeric wire value of the corresponding enum entry.
         * @return The enum associated with the given numeric wire value.
         */
        public static Type forNumber(int value) {
          switch (value) {
            case 1: return FILEDIFF;
            case 2: return DIRECTORYDIFF;
            default: return null;
          }
        }

        public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>
            internalGetValueMap() {
          return internalValueMap;
        }
        private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
            Type> internalValueMap =
              new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Type>() {
                public Type findValueByNumber(int number) {
                  return Type.forNumber(number);
                }
              };

        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
            getValueDescriptor() {
          return getDescriptor().getValues().get(ordinal());
        }
        public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
            getDescriptorForType() {
          return getDescriptor();
        }
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDescriptor().getEnumTypes().get(0);
        }

        private static final Type[] VALUES = values();

        public static Type valueOf(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
          if (desc.getType() != getDescriptor()) {
            throw new java.lang.IllegalArgumentException(
              "EnumValueDescriptor is not for this type.");
          }
          return VALUES[desc.getIndex()];
        }

        private final int value;

        private Type(int value) {
          this.value = value;
        }

        // @@protoc_insertion_point(enum_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type)
      }

      private int bitField0_;
      public static final int TYPE_FIELD_NUMBER = 1;
      private int type_ = 1;
      /**
       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
       * @return The type.
       */
      @java.lang.Override public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF : result;
      }

      public static final int INODEID_FIELD_NUMBER = 2;
      private long inodeId_ = 0L;
      /**
       * <code>optional uint64 inodeId = 2;</code>
       * @return Whether the inodeId field is set.
       */
      @java.lang.Override
      public boolean hasInodeId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 inodeId = 2;</code>
       * @return The inodeId.
       */
      @java.lang.Override
      public long getInodeId() {
        return inodeId_;
      }

      public static final int NUMOFDIFF_FIELD_NUMBER = 3;
      private int numOfDiff_ = 0;
      /**
       * <code>optional uint32 numOfDiff = 3;</code>
       * @return Whether the numOfDiff field is set.
       */
      @java.lang.Override
      public boolean hasNumOfDiff() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint32 numOfDiff = 3;</code>
       * @return The numOfDiff.
       */
      @java.lang.Override
      public int getNumOfDiff() {
        return numOfDiff_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        if (!hasType()) {
          memoizedIsInitialized = 0;
          return false;
        }
        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeEnum(1, type_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, inodeId_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeUInt32(3, numOfDiff_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSize(1, type_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, inodeId_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(3, numOfDiff_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) obj;

        if (hasType() != other.hasType()) return false;
        if (hasType()) {
          if (type_ != other.type_) return false;
        }
        if (hasInodeId() != other.hasInodeId()) return false;
        if (hasInodeId()) {
          if (getInodeId()
              != other.getInodeId()) return false;
        }
        if (hasNumOfDiff() != other.hasNumOfDiff()) return false;
        if (hasNumOfDiff()) {
          if (getNumOfDiff()
              != other.getNumOfDiff()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasType()) {
          hash = (37 * hash) + TYPE_FIELD_NUMBER;
          hash = (53 * hash) + type_;
        }
        if (hasInodeId()) {
          hash = (37 * hash) + INODEID_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getInodeId());
        }
        if (hasNumOfDiff()) {
          hash = (37 * hash) + NUMOFDIFF_FIELD_NUMBER;
          hash = (53 * hash) + getNumOfDiff();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          type_ = 1;
          inodeId_ = 0L;
          numOfDiff_ = 0;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.type_ = type_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.inodeId_ = inodeId_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.numOfDiff_ = numOfDiff_;
            to_bitField0_ |= 0x00000004;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.getDefaultInstance()) return this;
          if (other.hasType()) {
            setType(other.getType());
          }
          if (other.hasInodeId()) {
            setInodeId(other.getInodeId());
          }
          if (other.hasNumOfDiff()) {
            setNumOfDiff(other.getNumOfDiff());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          if (!hasType()) {
            return false;
          }
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type tmpValue =
                      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(1, tmpRaw);
                  } else {
                    type_ = tmpRaw;
                    bitField0_ |= 0x00000001;
                  }
                  break;
                } // case 8
                case 16: {
                  inodeId_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 24: {
                  numOfDiff_ = input.readUInt32();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 24
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int type_ = 1;
        /**
         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
         * @return Whether the type field is set.
         */
        @java.lang.Override public boolean hasType() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
         * @return The type.
         */
        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type getType() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type result = org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.forNumber(type_);
          return result == null ? org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type.FILEDIFF : result;
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
         * @param value The type to set.
         * @return This builder for chaining.
         */
        public Builder setType(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry.Type value) {
          if (value == null) {
            throw new NullPointerException();
          }
          bitField0_ |= 0x00000001;
          type_ = value.getNumber();
          onChanged();
          return this;
        }
        /**
         * <code>required .hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry.Type type = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearType() {
          bitField0_ = (bitField0_ & ~0x00000001);
          type_ = 1;
          onChanged();
          return this;
        }

        private long inodeId_ ;
        /**
         * <code>optional uint64 inodeId = 2;</code>
         * @return Whether the inodeId field is set.
         */
        @java.lang.Override
        public boolean hasInodeId() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint64 inodeId = 2;</code>
         * @return The inodeId.
         */
        @java.lang.Override
        public long getInodeId() {
          return inodeId_;
        }
        /**
         * <code>optional uint64 inodeId = 2;</code>
         * @param value The inodeId to set.
         * @return This builder for chaining.
         */
        public Builder setInodeId(long value) {

          inodeId_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 inodeId = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearInodeId() {
          bitField0_ = (bitField0_ & ~0x00000002);
          inodeId_ = 0L;
          onChanged();
          return this;
        }

        private int numOfDiff_ ;
        /**
         * <code>optional uint32 numOfDiff = 3;</code>
         * @return Whether the numOfDiff field is set.
         */
        @java.lang.Override
        public boolean hasNumOfDiff() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional uint32 numOfDiff = 3;</code>
         * @return The numOfDiff.
         */
        @java.lang.Override
        public int getNumOfDiff() {
          return numOfDiff_;
        }
        /**
         * <code>optional uint32 numOfDiff = 3;</code>
         * @param value The numOfDiff to set.
         * @return This builder for chaining.
         */
        public Builder setNumOfDiff(int value) {

          numOfDiff_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 numOfDiff = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearNumOfDiff() {
          bitField0_ = (bitField0_ & ~0x00000004);
          numOfDiff_ = 0;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection.DiffEntry)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DiffEntry>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DiffEntry>() {
        @java.lang.Override
        public DiffEntry parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<DiffEntry> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<DiffEntry> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) obj;

      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * This section records information about snapshot diffs
     * NAME: SNAPSHOT_DIFF
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.SnapshotDiffSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SnapshotDiffSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection(this);
        onBuilt();
        return result;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.getDefaultInstance()) return this;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SnapshotDiffSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SnapshotDiffSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffSection>() {
      @java.lang.Override
      public SnapshotDiffSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StringTableSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.StringTableSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint32 numEntry = 1;</code>
     * @return Whether the numEntry field is set.
     */
    boolean hasNumEntry();
    /**
     * <code>optional uint32 numEntry = 1;</code>
     * @return The numEntry.
     */
    int getNumEntry();

    /**
     * <pre>
     * repeated Entry
     * </pre>
     *
     * <code>optional uint32 maskBits = 2 [default = 0];</code>
     * @return Whether the maskBits field is set.
     */
    boolean hasMaskBits();
    /**
     * <pre>
     * repeated Entry
     * </pre>
     *
     * <code>optional uint32 maskBits = 2 [default = 0];</code>
     * @return The maskBits.
     */
    int getMaskBits();
  }
  /**
   * <pre>
   **
   * This section maps string to id
   * NAME: STRING_TABLE
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection}
   */
  public static final class StringTableSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.StringTableSection)
      StringTableSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StringTableSection.newBuilder() to construct.
    private StringTableSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StringTableSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StringTableSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class);
    }

    public interface EntryOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.StringTableSection.Entry)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 id = 1;</code>
       * @return Whether the id field is set.
       */
      boolean hasId();
      /**
       * <code>optional uint32 id = 1;</code>
       * @return The id.
       */
      int getId();

      /**
       * <code>optional string str = 2;</code>
       * @return Whether the str field is set.
       */
      boolean hasStr();
      /**
       * <code>optional string str = 2;</code>
       * @return The str.
       */
      java.lang.String getStr();
      /**
       * <code>optional string str = 2;</code>
       * @return The bytes for str.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getStrBytes();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry}
     */
    public static final class Entry extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.StringTableSection.Entry)
        EntryOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use Entry.newBuilder() to construct.
      private Entry(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private Entry() {
        str_ = "";
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new Entry();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class);
      }

      private int bitField0_;
      public static final int ID_FIELD_NUMBER = 1;
      private int id_ = 0;
      /**
       * <code>optional uint32 id = 1;</code>
       * @return Whether the id field is set.
       */
      @java.lang.Override
      public boolean hasId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 id = 1;</code>
       * @return The id.
       */
      @java.lang.Override
      public int getId() {
        return id_;
      }

      public static final int STR_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private volatile java.lang.Object str_ = "";
      /**
       * <code>optional string str = 2;</code>
       * @return Whether the str field is set.
       */
      @java.lang.Override
      public boolean hasStr() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string str = 2;</code>
       * @return The str.
       */
      @java.lang.Override
      public java.lang.String getStr() {
        java.lang.Object ref = str_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            str_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string str = 2;</code>
       * @return The bytes for str.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStrBytes() {
        java.lang.Object ref = str_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          str_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, id_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, str_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, id_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, str_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) obj;

        if (hasId() != other.hasId()) return false;
        if (hasId()) {
          if (getId()
              != other.getId()) return false;
        }
        if (hasStr() != other.hasStr()) return false;
        if (hasStr()) {
          if (!getStr()
              .equals(other.getStr())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasId()) {
          hash = (37 * hash) + ID_FIELD_NUMBER;
          hash = (53 * hash) + getId();
        }
        if (hasStr()) {
          hash = (37 * hash) + STR_FIELD_NUMBER;
          hash = (53 * hash) + getStr().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection.Entry}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.StringTableSection.Entry)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.EntryOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          id_ = 0;
          str_ = "";
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.id_ = id_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.str_ = str_;
            to_bitField0_ |= 0x00000002;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry.getDefaultInstance()) return this;
          if (other.hasId()) {
            setId(other.getId());
          }
          if (other.hasStr()) {
            str_ = other.str_;
            bitField0_ |= 0x00000002;
            onChanged();
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  id_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 18: {
                  str_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int id_ ;
        /**
         * <code>optional uint32 id = 1;</code>
         * @return Whether the id field is set.
         */
        @java.lang.Override
        public boolean hasId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @return The id.
         */
        @java.lang.Override
        public int getId() {
          return id_;
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @param value The id to set.
         * @return This builder for chaining.
         */
        public Builder setId(int value) {

          id_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          id_ = 0;
          onChanged();
          return this;
        }

        private java.lang.Object str_ = "";
        /**
         * <code>optional string str = 2;</code>
         * @return Whether the str field is set.
         */
        public boolean hasStr() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional string str = 2;</code>
         * @return The str.
         */
        public java.lang.String getStr() {
          java.lang.Object ref = str_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              str_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string str = 2;</code>
         * @return The bytes for str.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getStrBytes() {
          java.lang.Object ref = str_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            str_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string str = 2;</code>
         * @param value The str to set.
         * @return This builder for chaining.
         */
        public Builder setStr(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          str_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional string str = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearStr() {
          str_ = getDefaultInstance().getStr();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
          return this;
        }
        /**
         * <code>optional string str = 2;</code>
         * @param value The bytes for str to set.
         * @return This builder for chaining.
         */
        public Builder setStrBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          str_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection.Entry)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection.Entry)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<Entry>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<Entry>() {
        @java.lang.Override
        public Entry parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<Entry> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<Entry> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Entry getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private int bitField0_;
    public static final int NUMENTRY_FIELD_NUMBER = 1;
    private int numEntry_ = 0;
    /**
     * <code>optional uint32 numEntry = 1;</code>
     * @return Whether the numEntry field is set.
     */
    @java.lang.Override
    public boolean hasNumEntry() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint32 numEntry = 1;</code>
     * @return The numEntry.
     */
    @java.lang.Override
    public int getNumEntry() {
      return numEntry_;
    }

    public static final int MASKBITS_FIELD_NUMBER = 2;
    private int maskBits_ = 0;
    /**
     * <pre>
     * repeated Entry
     * </pre>
     *
     * <code>optional uint32 maskBits = 2 [default = 0];</code>
     * @return Whether the maskBits field is set.
     */
    @java.lang.Override
    public boolean hasMaskBits() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * repeated Entry
     * </pre>
     *
     * <code>optional uint32 maskBits = 2 [default = 0];</code>
     * @return The maskBits.
     */
    @java.lang.Override
    public int getMaskBits() {
      return maskBits_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, numEntry_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, maskBits_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, numEntry_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, maskBits_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) obj;

      if (hasNumEntry() != other.hasNumEntry()) return false;
      if (hasNumEntry()) {
        if (getNumEntry()
            != other.getNumEntry()) return false;
      }
      if (hasMaskBits() != other.hasMaskBits()) return false;
      if (hasMaskBits()) {
        if (getMaskBits()
            != other.getMaskBits()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasNumEntry()) {
        hash = (37 * hash) + NUMENTRY_FIELD_NUMBER;
        hash = (53 * hash) + getNumEntry();
      }
      if (hasMaskBits()) {
        hash = (37 * hash) + MASKBITS_FIELD_NUMBER;
        hash = (53 * hash) + getMaskBits();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * This section maps string to id
     * NAME: STRING_TABLE
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.fsimage.StringTableSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.StringTableSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        numEntry_ = 0;
        maskBits_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.numEntry_ = numEntry_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.maskBits_ = maskBits_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection.getDefaultInstance()) return this;
        if (other.hasNumEntry()) {
          setNumEntry(other.getNumEntry());
        }
        if (other.hasMaskBits()) {
          setMaskBits(other.getMaskBits());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                numEntry_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                maskBits_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int numEntry_ ;
      /**
       * <code>optional uint32 numEntry = 1;</code>
       * @return Whether the numEntry field is set.
       */
      @java.lang.Override
      public boolean hasNumEntry() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 numEntry = 1;</code>
       * @return The numEntry.
       */
      @java.lang.Override
      public int getNumEntry() {
        return numEntry_;
      }
      /**
       * <code>optional uint32 numEntry = 1;</code>
       * @param value The numEntry to set.
       * @return This builder for chaining.
       */
      public Builder setNumEntry(int value) {

        numEntry_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 numEntry = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumEntry() {
        bitField0_ = (bitField0_ & ~0x00000001);
        numEntry_ = 0;
        onChanged();
        return this;
      }

      private int maskBits_ ;
      /**
       * <pre>
       * repeated Entry
       * </pre>
       *
       * <code>optional uint32 maskBits = 2 [default = 0];</code>
       * @return Whether the maskBits field is set.
       */
      @java.lang.Override
      public boolean hasMaskBits() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * repeated Entry
       * </pre>
       *
       * <code>optional uint32 maskBits = 2 [default = 0];</code>
       * @return The maskBits.
       */
      @java.lang.Override
      public int getMaskBits() {
        return maskBits_;
      }
      /**
       * <pre>
       * repeated Entry
       * </pre>
       *
       * <code>optional uint32 maskBits = 2 [default = 0];</code>
       * @param value The maskBits to set.
       * @return This builder for chaining.
       */
      public Builder setMaskBits(int value) {

        maskBits_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * repeated Entry
       * </pre>
       *
       * <code>optional uint32 maskBits = 2 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearMaskBits() {
        bitField0_ = (bitField0_ & ~0x00000002);
        maskBits_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.StringTableSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.StringTableSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StringTableSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StringTableSection>() {
      @java.lang.Override
      public StringTableSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StringTableSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StringTableSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SecretManagerSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint32 currentId = 1;</code>
     * @return Whether the currentId field is set.
     */
    boolean hasCurrentId();
    /**
     * <code>optional uint32 currentId = 1;</code>
     * @return The currentId.
     */
    int getCurrentId();

    /**
     * <code>optional uint32 tokenSequenceNumber = 2;</code>
     * @return Whether the tokenSequenceNumber field is set.
     */
    boolean hasTokenSequenceNumber();
    /**
     * <code>optional uint32 tokenSequenceNumber = 2;</code>
     * @return The tokenSequenceNumber.
     */
    int getTokenSequenceNumber();

    /**
     * <code>optional uint32 numKeys = 3;</code>
     * @return Whether the numKeys field is set.
     */
    boolean hasNumKeys();
    /**
     * <code>optional uint32 numKeys = 3;</code>
     * @return The numKeys.
     */
    int getNumKeys();

    /**
     * <pre>
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * </pre>
     *
     * <code>optional uint32 numTokens = 4;</code>
     * @return Whether the numTokens field is set.
     */
    boolean hasNumTokens();
    /**
     * <pre>
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * </pre>
     *
     * <code>optional uint32 numTokens = 4;</code>
     * @return The numTokens.
     */
    int getNumTokens();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection}
   */
  public static final class SecretManagerSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection)
      SecretManagerSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SecretManagerSection.newBuilder() to construct.
    private SecretManagerSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SecretManagerSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SecretManagerSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class);
    }

    public interface DelegationKeyOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 id = 1;</code>
       * @return Whether the id field is set.
       */
      boolean hasId();
      /**
       * <code>optional uint32 id = 1;</code>
       * @return The id.
       */
      int getId();

      /**
       * <code>optional uint64 expiryDate = 2;</code>
       * @return Whether the expiryDate field is set.
       */
      boolean hasExpiryDate();
      /**
       * <code>optional uint64 expiryDate = 2;</code>
       * @return The expiryDate.
       */
      long getExpiryDate();

      /**
       * <code>optional bytes key = 3;</code>
       * @return Whether the key field is set.
       */
      boolean hasKey();
      /**
       * <code>optional bytes key = 3;</code>
       * @return The key.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString getKey();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey}
     */
    public static final class DelegationKey extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
        DelegationKeyOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use DelegationKey.newBuilder() to construct.
      private DelegationKey(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private DelegationKey() {
        key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new DelegationKey();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class);
      }

      private int bitField0_;
      public static final int ID_FIELD_NUMBER = 1;
      private int id_ = 0;
      /**
       * <code>optional uint32 id = 1;</code>
       * @return Whether the id field is set.
       */
      @java.lang.Override
      public boolean hasId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 id = 1;</code>
       * @return The id.
       */
      @java.lang.Override
      public int getId() {
        return id_;
      }

      public static final int EXPIRYDATE_FIELD_NUMBER = 2;
      private long expiryDate_ = 0L;
      /**
       * <code>optional uint64 expiryDate = 2;</code>
       * @return Whether the expiryDate field is set.
       */
      @java.lang.Override
      public boolean hasExpiryDate() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 expiryDate = 2;</code>
       * @return The expiryDate.
       */
      @java.lang.Override
      public long getExpiryDate() {
        return expiryDate_;
      }

      public static final int KEY_FIELD_NUMBER = 3;
      private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes key = 3;</code>
       * @return Whether the key field is set.
       */
      @java.lang.Override
      public boolean hasKey() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes key = 3;</code>
       * @return The key.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
        return key_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, id_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          output.writeUInt64(2, expiryDate_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          output.writeBytes(3, key_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, id_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(2, expiryDate_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeBytesSize(3, key_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) obj;

        if (hasId() != other.hasId()) return false;
        if (hasId()) {
          if (getId()
              != other.getId()) return false;
        }
        if (hasExpiryDate() != other.hasExpiryDate()) return false;
        if (hasExpiryDate()) {
          if (getExpiryDate()
              != other.getExpiryDate()) return false;
        }
        if (hasKey() != other.hasKey()) return false;
        if (hasKey()) {
          if (!getKey()
              .equals(other.getKey())) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasId()) {
          hash = (37 * hash) + ID_FIELD_NUMBER;
          hash = (53 * hash) + getId();
        }
        if (hasExpiryDate()) {
          hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getExpiryDate());
        }
        if (hasKey()) {
          hash = (37 * hash) + KEY_FIELD_NUMBER;
          hash = (53 * hash) + getKey().hashCode();
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKeyOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          id_ = 0;
          expiryDate_ = 0L;
          key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.id_ = id_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.expiryDate_ = expiryDate_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.key_ = key_;
            to_bitField0_ |= 0x00000004;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey.getDefaultInstance()) return this;
          if (other.hasId()) {
            setId(other.getId());
          }
          if (other.hasExpiryDate()) {
            setExpiryDate(other.getExpiryDate());
          }
          if (other.hasKey()) {
            setKey(other.getKey());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  id_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 16: {
                  expiryDate_ = input.readUInt64();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 16
                case 26: {
                  key_ = input.readBytes();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 26
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int id_ ;
        /**
         * <code>optional uint32 id = 1;</code>
         * @return Whether the id field is set.
         */
        @java.lang.Override
        public boolean hasId() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @return The id.
         */
        @java.lang.Override
        public int getId() {
          return id_;
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @param value The id to set.
         * @return This builder for chaining.
         */
        public Builder setId(int value) {

          id_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 id = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearId() {
          bitField0_ = (bitField0_ & ~0x00000001);
          id_ = 0;
          onChanged();
          return this;
        }

        private long expiryDate_ ;
        /**
         * <code>optional uint64 expiryDate = 2;</code>
         * @return Whether the expiryDate field is set.
         */
        @java.lang.Override
        public boolean hasExpiryDate() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional uint64 expiryDate = 2;</code>
         * @return The expiryDate.
         */
        @java.lang.Override
        public long getExpiryDate() {
          return expiryDate_;
        }
        /**
         * <code>optional uint64 expiryDate = 2;</code>
         * @param value The expiryDate to set.
         * @return This builder for chaining.
         */
        public Builder setExpiryDate(long value) {

          expiryDate_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 expiryDate = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearExpiryDate() {
          bitField0_ = (bitField0_ & ~0x00000002);
          expiryDate_ = 0L;
          onChanged();
          return this;
        }

        private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        /**
         * <code>optional bytes key = 3;</code>
         * @return Whether the key field is set.
         */
        @java.lang.Override
        public boolean hasKey() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional bytes key = 3;</code>
         * @return The key.
         */
        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
          return key_;
        }
        /**
         * <code>optional bytes key = 3;</code>
         * @param value The key to set.
         * @return This builder for chaining.
         */
        public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          key_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional bytes key = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearKey() {
          bitField0_ = (bitField0_ & ~0x00000004);
          key_ = getDefaultInstance().getKey();
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.DelegationKey)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DelegationKey>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DelegationKey>() {
        @java.lang.Override
        public DelegationKey parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<DelegationKey> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<DelegationKey> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.DelegationKey getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    public interface PersistTokenOrBuilder extends
        // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
        org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

      /**
       * <code>optional uint32 version = 1;</code>
       * @return Whether the version field is set.
       */
      boolean hasVersion();
      /**
       * <code>optional uint32 version = 1;</code>
       * @return The version.
       */
      int getVersion();

      /**
       * <code>optional string owner = 2;</code>
       * @return Whether the owner field is set.
       */
      boolean hasOwner();
      /**
       * <code>optional string owner = 2;</code>
       * @return The owner.
       */
      java.lang.String getOwner();
      /**
       * <code>optional string owner = 2;</code>
       * @return The bytes for owner.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerBytes();

      /**
       * <code>optional string renewer = 3;</code>
       * @return Whether the renewer field is set.
       */
      boolean hasRenewer();
      /**
       * <code>optional string renewer = 3;</code>
       * @return The renewer.
       */
      java.lang.String getRenewer();
      /**
       * <code>optional string renewer = 3;</code>
       * @return The bytes for renewer.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getRenewerBytes();

      /**
       * <code>optional string realUser = 4;</code>
       * @return Whether the realUser field is set.
       */
      boolean hasRealUser();
      /**
       * <code>optional string realUser = 4;</code>
       * @return The realUser.
       */
      java.lang.String getRealUser();
      /**
       * <code>optional string realUser = 4;</code>
       * @return The bytes for realUser.
       */
      org.apache.hadoop.thirdparty.protobuf.ByteString
          getRealUserBytes();

      /**
       * <code>optional uint64 issueDate = 5;</code>
       * @return Whether the issueDate field is set.
       */
      boolean hasIssueDate();
      /**
       * <code>optional uint64 issueDate = 5;</code>
       * @return The issueDate.
       */
      long getIssueDate();

      /**
       * <code>optional uint64 maxDate = 6;</code>
       * @return Whether the maxDate field is set.
       */
      boolean hasMaxDate();
      /**
       * <code>optional uint64 maxDate = 6;</code>
       * @return The maxDate.
       */
      long getMaxDate();

      /**
       * <code>optional uint32 sequenceNumber = 7;</code>
       * @return Whether the sequenceNumber field is set.
       */
      boolean hasSequenceNumber();
      /**
       * <code>optional uint32 sequenceNumber = 7;</code>
       * @return The sequenceNumber.
       */
      int getSequenceNumber();

      /**
       * <code>optional uint32 masterKeyId = 8;</code>
       * @return Whether the masterKeyId field is set.
       */
      boolean hasMasterKeyId();
      /**
       * <code>optional uint32 masterKeyId = 8;</code>
       * @return The masterKeyId.
       */
      int getMasterKeyId();

      /**
       * <code>optional uint64 expiryDate = 9;</code>
       * @return Whether the expiryDate field is set.
       */
      boolean hasExpiryDate();
      /**
       * <code>optional uint64 expiryDate = 9;</code>
       * @return The expiryDate.
       */
      long getExpiryDate();
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken}
     */
    public static final class PersistToken extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
        // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
        PersistTokenOrBuilder {
    private static final long serialVersionUID = 0L;
      // Use PersistToken.newBuilder() to construct.
      private PersistToken(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
        super(builder);
      }
      private PersistToken() {
        owner_ = "";
        renewer_ = "";
        realUser_ = "";
      }

      @java.lang.Override
      @SuppressWarnings({"unused"})
      protected java.lang.Object newInstance(
          UnusedPrivateParameter unused) {
        return new PersistToken();
      }

      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class);
      }

      private int bitField0_;
      public static final int VERSION_FIELD_NUMBER = 1;
      private int version_ = 0;
      /**
       * <code>optional uint32 version = 1;</code>
       * @return Whether the version field is set.
       */
      @java.lang.Override
      public boolean hasVersion() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 version = 1;</code>
       * @return The version.
       */
      @java.lang.Override
      public int getVersion() {
        return version_;
      }

      public static final int OWNER_FIELD_NUMBER = 2;
      @SuppressWarnings("serial")
      private volatile java.lang.Object owner_ = "";
      /**
       * <code>optional string owner = 2;</code>
       * @return Whether the owner field is set.
       */
      @java.lang.Override
      public boolean hasOwner() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string owner = 2;</code>
       * @return The owner.
       */
      @java.lang.Override
      public java.lang.String getOwner() {
        java.lang.Object ref = owner_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            owner_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string owner = 2;</code>
       * @return The bytes for owner.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerBytes() {
        java.lang.Object ref = owner_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          owner_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      public static final int RENEWER_FIELD_NUMBER = 3;
      @SuppressWarnings("serial")
      private volatile java.lang.Object renewer_ = "";
      /**
       * <code>optional string renewer = 3;</code>
       * @return Whether the renewer field is set.
       */
      @java.lang.Override
      public boolean hasRenewer() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string renewer = 3;</code>
       * @return The renewer.
       */
      @java.lang.Override
      public java.lang.String getRenewer() {
        java.lang.Object ref = renewer_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            renewer_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string renewer = 3;</code>
       * @return The bytes for renewer.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getRenewerBytes() {
        java.lang.Object ref = renewer_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          renewer_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      public static final int REALUSER_FIELD_NUMBER = 4;
      @SuppressWarnings("serial")
      private volatile java.lang.Object realUser_ = "";
      /**
       * <code>optional string realUser = 4;</code>
       * @return Whether the realUser field is set.
       */
      @java.lang.Override
      public boolean hasRealUser() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional string realUser = 4;</code>
       * @return The realUser.
       */
      @java.lang.Override
      public java.lang.String getRealUser() {
        java.lang.Object ref = realUser_;
        if (ref instanceof java.lang.String) {
          return (java.lang.String) ref;
        } else {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            realUser_ = s;
          }
          return s;
        }
      }
      /**
       * <code>optional string realUser = 4;</code>
       * @return The bytes for realUser.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getRealUserBytes() {
        java.lang.Object ref = realUser_;
        if (ref instanceof java.lang.String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          realUser_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }

      public static final int ISSUEDATE_FIELD_NUMBER = 5;
      private long issueDate_ = 0L;
      /**
       * <code>optional uint64 issueDate = 5;</code>
       * @return Whether the issueDate field is set.
       */
      @java.lang.Override
      public boolean hasIssueDate() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 issueDate = 5;</code>
       * @return The issueDate.
       */
      @java.lang.Override
      public long getIssueDate() {
        return issueDate_;
      }

      public static final int MAXDATE_FIELD_NUMBER = 6;
      private long maxDate_ = 0L;
      /**
       * <code>optional uint64 maxDate = 6;</code>
       * @return Whether the maxDate field is set.
       */
      @java.lang.Override
      public boolean hasMaxDate() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 maxDate = 6;</code>
       * @return The maxDate.
       */
      @java.lang.Override
      public long getMaxDate() {
        return maxDate_;
      }

      public static final int SEQUENCENUMBER_FIELD_NUMBER = 7;
      private int sequenceNumber_ = 0;
      /**
       * <code>optional uint32 sequenceNumber = 7;</code>
       * @return Whether the sequenceNumber field is set.
       */
      @java.lang.Override
      public boolean hasSequenceNumber() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional uint32 sequenceNumber = 7;</code>
       * @return The sequenceNumber.
       */
      @java.lang.Override
      public int getSequenceNumber() {
        return sequenceNumber_;
      }

      public static final int MASTERKEYID_FIELD_NUMBER = 8;
      private int masterKeyId_ = 0;
      /**
       * <code>optional uint32 masterKeyId = 8;</code>
       * @return Whether the masterKeyId field is set.
       */
      @java.lang.Override
      public boolean hasMasterKeyId() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional uint32 masterKeyId = 8;</code>
       * @return The masterKeyId.
       */
      @java.lang.Override
      public int getMasterKeyId() {
        return masterKeyId_;
      }

      public static final int EXPIRYDATE_FIELD_NUMBER = 9;
      private long expiryDate_ = 0L;
      /**
       * <code>optional uint64 expiryDate = 9;</code>
       * @return Whether the expiryDate field is set.
       */
      @java.lang.Override
      public boolean hasExpiryDate() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional uint64 expiryDate = 9;</code>
       * @return The expiryDate.
       */
      @java.lang.Override
      public long getExpiryDate() {
        return expiryDate_;
      }

      private byte memoizedIsInitialized = -1;
      @java.lang.Override
      public final boolean isInitialized() {
        byte isInitialized = memoizedIsInitialized;
        if (isInitialized == 1) return true;
        if (isInitialized == 0) return false;

        memoizedIsInitialized = 1;
        return true;
      }

      @java.lang.Override
      public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                          throws java.io.IOException {
        if (((bitField0_ & 0x00000001) != 0)) {
          output.writeUInt32(1, version_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, owner_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, renewer_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, realUser_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          output.writeUInt64(5, issueDate_);
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          output.writeUInt64(6, maxDate_);
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          output.writeUInt32(7, sequenceNumber_);
        }
        if (((bitField0_ & 0x00000080) != 0)) {
          output.writeUInt32(8, masterKeyId_);
        }
        if (((bitField0_ & 0x00000100) != 0)) {
          output.writeUInt64(9, expiryDate_);
        }
        getUnknownFields().writeTo(output);
      }

      @java.lang.Override
      public int getSerializedSize() {
        int size = memoizedSize;
        if (size != -1) return size;

        size = 0;
        if (((bitField0_ & 0x00000001) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(1, version_);
        }
        if (((bitField0_ & 0x00000002) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, owner_);
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, renewer_);
        }
        if (((bitField0_ & 0x00000008) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, realUser_);
        }
        if (((bitField0_ & 0x00000010) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(5, issueDate_);
        }
        if (((bitField0_ & 0x00000020) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(6, maxDate_);
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(7, sequenceNumber_);
        }
        if (((bitField0_ & 0x00000080) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32Size(8, masterKeyId_);
        }
        if (((bitField0_ & 0x00000100) != 0)) {
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt64Size(9, expiryDate_);
        }
        size += getUnknownFields().getSerializedSize();
        memoizedSize = size;
        return size;
      }

      @java.lang.Override
      public boolean equals(final java.lang.Object obj) {
        if (obj == this) {
         return true;
        }
        if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken)) {
          return super.equals(obj);
        }
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) obj;

        if (hasVersion() != other.hasVersion()) return false;
        if (hasVersion()) {
          if (getVersion()
              != other.getVersion()) return false;
        }
        if (hasOwner() != other.hasOwner()) return false;
        if (hasOwner()) {
          if (!getOwner()
              .equals(other.getOwner())) return false;
        }
        if (hasRenewer() != other.hasRenewer()) return false;
        if (hasRenewer()) {
          if (!getRenewer()
              .equals(other.getRenewer())) return false;
        }
        if (hasRealUser() != other.hasRealUser()) return false;
        if (hasRealUser()) {
          if (!getRealUser()
              .equals(other.getRealUser())) return false;
        }
        if (hasIssueDate() != other.hasIssueDate()) return false;
        if (hasIssueDate()) {
          if (getIssueDate()
              != other.getIssueDate()) return false;
        }
        if (hasMaxDate() != other.hasMaxDate()) return false;
        if (hasMaxDate()) {
          if (getMaxDate()
              != other.getMaxDate()) return false;
        }
        if (hasSequenceNumber() != other.hasSequenceNumber()) return false;
        if (hasSequenceNumber()) {
          if (getSequenceNumber()
              != other.getSequenceNumber()) return false;
        }
        if (hasMasterKeyId() != other.hasMasterKeyId()) return false;
        if (hasMasterKeyId()) {
          if (getMasterKeyId()
              != other.getMasterKeyId()) return false;
        }
        if (hasExpiryDate() != other.hasExpiryDate()) return false;
        if (hasExpiryDate()) {
          if (getExpiryDate()
              != other.getExpiryDate()) return false;
        }
        if (!getUnknownFields().equals(other.getUnknownFields())) return false;
        return true;
      }

      @java.lang.Override
      public int hashCode() {
        if (memoizedHashCode != 0) {
          return memoizedHashCode;
        }
        int hash = 41;
        hash = (19 * hash) + getDescriptor().hashCode();
        if (hasVersion()) {
          hash = (37 * hash) + VERSION_FIELD_NUMBER;
          hash = (53 * hash) + getVersion();
        }
        if (hasOwner()) {
          hash = (37 * hash) + OWNER_FIELD_NUMBER;
          hash = (53 * hash) + getOwner().hashCode();
        }
        if (hasRenewer()) {
          hash = (37 * hash) + RENEWER_FIELD_NUMBER;
          hash = (53 * hash) + getRenewer().hashCode();
        }
        if (hasRealUser()) {
          hash = (37 * hash) + REALUSER_FIELD_NUMBER;
          hash = (53 * hash) + getRealUser().hashCode();
        }
        if (hasIssueDate()) {
          hash = (37 * hash) + ISSUEDATE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getIssueDate());
        }
        if (hasMaxDate()) {
          hash = (37 * hash) + MAXDATE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getMaxDate());
        }
        if (hasSequenceNumber()) {
          hash = (37 * hash) + SEQUENCENUMBER_FIELD_NUMBER;
          hash = (53 * hash) + getSequenceNumber();
        }
        if (hasMasterKeyId()) {
          hash = (37 * hash) + MASTERKEYID_FIELD_NUMBER;
          hash = (53 * hash) + getMasterKeyId();
        }
        if (hasExpiryDate()) {
          hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
          hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
              getExpiryDate());
        }
        hash = (29 * hash) + getUnknownFields().hashCode();
        memoizedHashCode = hash;
        return hash;
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          java.nio.ByteBuffer data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          java.nio.ByteBuffer data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          org.apache.hadoop.thirdparty.protobuf.ByteString data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(byte[] data)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          byte[] data,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        return PARSER.parseFrom(data, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom(java.io.InputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input);
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseDelimitedFrom(
          java.io.InputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input);
      }
      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken parseFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
            .parseWithIOException(PARSER, input, extensionRegistry);
      }

      @java.lang.Override
      public Builder newBuilderForType() { return newBuilder(); }
      public static Builder newBuilder() {
        return DEFAULT_INSTANCE.toBuilder();
      }
      public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken prototype) {
        return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
      }
      @java.lang.Override
      public Builder toBuilder() {
        return this == DEFAULT_INSTANCE
            ? new Builder() : new Builder().mergeFrom(this);
      }

      @java.lang.Override
      protected Builder newBuilderForType(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        Builder builder = new Builder(parent);
        return builder;
      }
      /**
       * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection.PersistToken}
       */
      public static final class Builder extends
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
          // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistTokenOrBuilder {
        public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptor() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
        }

        @java.lang.Override
        protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
            internalGetFieldAccessorTable() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable
              .ensureFieldAccessorsInitialized(
                  org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.Builder.class);
        }

        // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.newBuilder()
        private Builder() {

        }

        private Builder(
            org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
          super(parent);

        }
        @java.lang.Override
        public Builder clear() {
          super.clear();
          bitField0_ = 0;
          version_ = 0;
          owner_ = "";
          renewer_ = "";
          realUser_ = "";
          issueDate_ = 0L;
          maxDate_ = 0L;
          sequenceNumber_ = 0;
          masterKeyId_ = 0;
          expiryDate_ = 0L;
          return this;
        }

        @java.lang.Override
        public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
            getDescriptorForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstanceForType() {
          return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance();
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken build() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = buildPartial();
          if (!result.isInitialized()) {
            throw newUninitializedMessageException(result);
          }
          return result;
        }

        @java.lang.Override
        public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken buildPartial() {
          org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken(this);
          if (bitField0_ != 0) { buildPartial0(result); }
          onBuilt();
          return result;
        }

        private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken result) {
          int from_bitField0_ = bitField0_;
          int to_bitField0_ = 0;
          if (((from_bitField0_ & 0x00000001) != 0)) {
            result.version_ = version_;
            to_bitField0_ |= 0x00000001;
          }
          if (((from_bitField0_ & 0x00000002) != 0)) {
            result.owner_ = owner_;
            to_bitField0_ |= 0x00000002;
          }
          if (((from_bitField0_ & 0x00000004) != 0)) {
            result.renewer_ = renewer_;
            to_bitField0_ |= 0x00000004;
          }
          if (((from_bitField0_ & 0x00000008) != 0)) {
            result.realUser_ = realUser_;
            to_bitField0_ |= 0x00000008;
          }
          if (((from_bitField0_ & 0x00000010) != 0)) {
            result.issueDate_ = issueDate_;
            to_bitField0_ |= 0x00000010;
          }
          if (((from_bitField0_ & 0x00000020) != 0)) {
            result.maxDate_ = maxDate_;
            to_bitField0_ |= 0x00000020;
          }
          if (((from_bitField0_ & 0x00000040) != 0)) {
            result.sequenceNumber_ = sequenceNumber_;
            to_bitField0_ |= 0x00000040;
          }
          if (((from_bitField0_ & 0x00000080) != 0)) {
            result.masterKeyId_ = masterKeyId_;
            to_bitField0_ |= 0x00000080;
          }
          if (((from_bitField0_ & 0x00000100) != 0)) {
            result.expiryDate_ = expiryDate_;
            to_bitField0_ |= 0x00000100;
          }
          result.bitField0_ |= to_bitField0_;
        }

        @java.lang.Override
        public Builder clone() {
          return super.clone();
        }
        @java.lang.Override
        public Builder setField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.setField(field, value);
        }
        @java.lang.Override
        public Builder clearField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
          return super.clearField(field);
        }
        @java.lang.Override
        public Builder clearOneof(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
          return super.clearOneof(oneof);
        }
        @java.lang.Override
        public Builder setRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            int index, java.lang.Object value) {
          return super.setRepeatedField(field, index, value);
        }
        @java.lang.Override
        public Builder addRepeatedField(
            org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
            java.lang.Object value) {
          return super.addRepeatedField(field, value);
        }
        @java.lang.Override
        public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
          if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken) {
            return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken)other);
          } else {
            super.mergeFrom(other);
            return this;
          }
        }

        public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken other) {
          if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken.getDefaultInstance()) return this;
          if (other.hasVersion()) {
            setVersion(other.getVersion());
          }
          if (other.hasOwner()) {
            owner_ = other.owner_;
            bitField0_ |= 0x00000002;
            onChanged();
          }
          if (other.hasRenewer()) {
            renewer_ = other.renewer_;
            bitField0_ |= 0x00000004;
            onChanged();
          }
          if (other.hasRealUser()) {
            realUser_ = other.realUser_;
            bitField0_ |= 0x00000008;
            onChanged();
          }
          if (other.hasIssueDate()) {
            setIssueDate(other.getIssueDate());
          }
          if (other.hasMaxDate()) {
            setMaxDate(other.getMaxDate());
          }
          if (other.hasSequenceNumber()) {
            setSequenceNumber(other.getSequenceNumber());
          }
          if (other.hasMasterKeyId()) {
            setMasterKeyId(other.getMasterKeyId());
          }
          if (other.hasExpiryDate()) {
            setExpiryDate(other.getExpiryDate());
          }
          this.mergeUnknownFields(other.getUnknownFields());
          onChanged();
          return this;
        }

        @java.lang.Override
        public final boolean isInitialized() {
          return true;
        }

        @java.lang.Override
        public Builder mergeFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws java.io.IOException {
          if (extensionRegistry == null) {
            throw new java.lang.NullPointerException();
          }
          try {
            boolean done = false;
            while (!done) {
              int tag = input.readTag();
              switch (tag) {
                case 0:
                  done = true;
                  break;
                case 8: {
                  version_ = input.readUInt32();
                  bitField0_ |= 0x00000001;
                  break;
                } // case 8
                case 18: {
                  owner_ = input.readBytes();
                  bitField0_ |= 0x00000002;
                  break;
                } // case 18
                case 26: {
                  renewer_ = input.readBytes();
                  bitField0_ |= 0x00000004;
                  break;
                } // case 26
                case 34: {
                  realUser_ = input.readBytes();
                  bitField0_ |= 0x00000008;
                  break;
                } // case 34
                case 40: {
                  issueDate_ = input.readUInt64();
                  bitField0_ |= 0x00000010;
                  break;
                } // case 40
                case 48: {
                  maxDate_ = input.readUInt64();
                  bitField0_ |= 0x00000020;
                  break;
                } // case 48
                case 56: {
                  sequenceNumber_ = input.readUInt32();
                  bitField0_ |= 0x00000040;
                  break;
                } // case 56
                case 64: {
                  masterKeyId_ = input.readUInt32();
                  bitField0_ |= 0x00000080;
                  break;
                } // case 64
                case 72: {
                  expiryDate_ = input.readUInt64();
                  bitField0_ |= 0x00000100;
                  break;
                } // case 72
                default: {
                  if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                    done = true; // was an endgroup tag
                  }
                  break;
                } // default:
              } // switch (tag)
            } // while (!done)
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.unwrapIOException();
          } finally {
            onChanged();
          } // finally
          return this;
        }
        private int bitField0_;

        private int version_ ;
        /**
         * <code>optional uint32 version = 1;</code>
         * @return Whether the version field is set.
         */
        @java.lang.Override
        public boolean hasVersion() {
          return ((bitField0_ & 0x00000001) != 0);
        }
        /**
         * <code>optional uint32 version = 1;</code>
         * @return The version.
         */
        @java.lang.Override
        public int getVersion() {
          return version_;
        }
        /**
         * <code>optional uint32 version = 1;</code>
         * @param value The version to set.
         * @return This builder for chaining.
         */
        public Builder setVersion(int value) {

          version_ = value;
          bitField0_ |= 0x00000001;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 version = 1;</code>
         * @return This builder for chaining.
         */
        public Builder clearVersion() {
          bitField0_ = (bitField0_ & ~0x00000001);
          version_ = 0;
          onChanged();
          return this;
        }

        private java.lang.Object owner_ = "";
        /**
         * <code>optional string owner = 2;</code>
         * @return Whether the owner field is set.
         */
        public boolean hasOwner() {
          return ((bitField0_ & 0x00000002) != 0);
        }
        /**
         * <code>optional string owner = 2;</code>
         * @return The owner.
         */
        public java.lang.String getOwner() {
          java.lang.Object ref = owner_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              owner_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string owner = 2;</code>
         * @return The bytes for owner.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getOwnerBytes() {
          java.lang.Object ref = owner_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            owner_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string owner = 2;</code>
         * @param value The owner to set.
         * @return This builder for chaining.
         */
        public Builder setOwner(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          owner_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }
        /**
         * <code>optional string owner = 2;</code>
         * @return This builder for chaining.
         */
        public Builder clearOwner() {
          owner_ = getDefaultInstance().getOwner();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
          return this;
        }
        /**
         * <code>optional string owner = 2;</code>
         * @param value The bytes for owner to set.
         * @return This builder for chaining.
         */
        public Builder setOwnerBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          owner_ = value;
          bitField0_ |= 0x00000002;
          onChanged();
          return this;
        }

        private java.lang.Object renewer_ = "";
        /**
         * <code>optional string renewer = 3;</code>
         * @return Whether the renewer field is set.
         */
        public boolean hasRenewer() {
          return ((bitField0_ & 0x00000004) != 0);
        }
        /**
         * <code>optional string renewer = 3;</code>
         * @return The renewer.
         */
        public java.lang.String getRenewer() {
          java.lang.Object ref = renewer_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              renewer_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string renewer = 3;</code>
         * @return The bytes for renewer.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getRenewerBytes() {
          java.lang.Object ref = renewer_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            renewer_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string renewer = 3;</code>
         * @param value The renewer to set.
         * @return This builder for chaining.
         */
        public Builder setRenewer(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          renewer_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }
        /**
         * <code>optional string renewer = 3;</code>
         * @return This builder for chaining.
         */
        public Builder clearRenewer() {
          renewer_ = getDefaultInstance().getRenewer();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
          return this;
        }
        /**
         * <code>optional string renewer = 3;</code>
         * @param value The bytes for renewer to set.
         * @return This builder for chaining.
         */
        public Builder setRenewerBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          renewer_ = value;
          bitField0_ |= 0x00000004;
          onChanged();
          return this;
        }

        private java.lang.Object realUser_ = "";
        /**
         * <code>optional string realUser = 4;</code>
         * @return Whether the realUser field is set.
         */
        public boolean hasRealUser() {
          return ((bitField0_ & 0x00000008) != 0);
        }
        /**
         * <code>optional string realUser = 4;</code>
         * @return The realUser.
         */
        public java.lang.String getRealUser() {
          java.lang.Object ref = realUser_;
          if (!(ref instanceof java.lang.String)) {
            org.apache.hadoop.thirdparty.protobuf.ByteString bs =
                (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
            java.lang.String s = bs.toStringUtf8();
            if (bs.isValidUtf8()) {
              realUser_ = s;
            }
            return s;
          } else {
            return (java.lang.String) ref;
          }
        }
        /**
         * <code>optional string realUser = 4;</code>
         * @return The bytes for realUser.
         */
        public org.apache.hadoop.thirdparty.protobuf.ByteString
            getRealUserBytes() {
          java.lang.Object ref = realUser_;
          if (ref instanceof String) {
            org.apache.hadoop.thirdparty.protobuf.ByteString b = 
                org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                    (java.lang.String) ref);
            realUser_ = b;
            return b;
          } else {
            return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          }
        }
        /**
         * <code>optional string realUser = 4;</code>
         * @param value The realUser to set.
         * @return This builder for chaining.
         */
        public Builder setRealUser(
            java.lang.String value) {
          if (value == null) { throw new NullPointerException(); }
          realUser_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }
        /**
         * <code>optional string realUser = 4;</code>
         * @return This builder for chaining.
         */
        public Builder clearRealUser() {
          realUser_ = getDefaultInstance().getRealUser();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
          return this;
        }
        /**
         * <code>optional string realUser = 4;</code>
         * @param value The bytes for realUser to set.
         * @return This builder for chaining.
         */
        public Builder setRealUserBytes(
            org.apache.hadoop.thirdparty.protobuf.ByteString value) {
          if (value == null) { throw new NullPointerException(); }
          realUser_ = value;
          bitField0_ |= 0x00000008;
          onChanged();
          return this;
        }

        private long issueDate_ ;
        /**
         * <code>optional uint64 issueDate = 5;</code>
         * @return Whether the issueDate field is set.
         */
        @java.lang.Override
        public boolean hasIssueDate() {
          return ((bitField0_ & 0x00000010) != 0);
        }
        /**
         * <code>optional uint64 issueDate = 5;</code>
         * @return The issueDate.
         */
        @java.lang.Override
        public long getIssueDate() {
          return issueDate_;
        }
        /**
         * <code>optional uint64 issueDate = 5;</code>
         * @param value The issueDate to set.
         * @return This builder for chaining.
         */
        public Builder setIssueDate(long value) {

          issueDate_ = value;
          bitField0_ |= 0x00000010;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 issueDate = 5;</code>
         * @return This builder for chaining.
         */
        public Builder clearIssueDate() {
          bitField0_ = (bitField0_ & ~0x00000010);
          issueDate_ = 0L;
          onChanged();
          return this;
        }

        private long maxDate_ ;
        /**
         * <code>optional uint64 maxDate = 6;</code>
         * @return Whether the maxDate field is set.
         */
        @java.lang.Override
        public boolean hasMaxDate() {
          return ((bitField0_ & 0x00000020) != 0);
        }
        /**
         * <code>optional uint64 maxDate = 6;</code>
         * @return The maxDate.
         */
        @java.lang.Override
        public long getMaxDate() {
          return maxDate_;
        }
        /**
         * <code>optional uint64 maxDate = 6;</code>
         * @param value The maxDate to set.
         * @return This builder for chaining.
         */
        public Builder setMaxDate(long value) {

          maxDate_ = value;
          bitField0_ |= 0x00000020;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 maxDate = 6;</code>
         * @return This builder for chaining.
         */
        public Builder clearMaxDate() {
          bitField0_ = (bitField0_ & ~0x00000020);
          maxDate_ = 0L;
          onChanged();
          return this;
        }

        private int sequenceNumber_ ;
        /**
         * <code>optional uint32 sequenceNumber = 7;</code>
         * @return Whether the sequenceNumber field is set.
         */
        @java.lang.Override
        public boolean hasSequenceNumber() {
          return ((bitField0_ & 0x00000040) != 0);
        }
        /**
         * <code>optional uint32 sequenceNumber = 7;</code>
         * @return The sequenceNumber.
         */
        @java.lang.Override
        public int getSequenceNumber() {
          return sequenceNumber_;
        }
        /**
         * <code>optional uint32 sequenceNumber = 7;</code>
         * @param value The sequenceNumber to set.
         * @return This builder for chaining.
         */
        public Builder setSequenceNumber(int value) {

          sequenceNumber_ = value;
          bitField0_ |= 0x00000040;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 sequenceNumber = 7;</code>
         * @return This builder for chaining.
         */
        public Builder clearSequenceNumber() {
          bitField0_ = (bitField0_ & ~0x00000040);
          sequenceNumber_ = 0;
          onChanged();
          return this;
        }

        private int masterKeyId_ ;
        /**
         * <code>optional uint32 masterKeyId = 8;</code>
         * @return Whether the masterKeyId field is set.
         */
        @java.lang.Override
        public boolean hasMasterKeyId() {
          return ((bitField0_ & 0x00000080) != 0);
        }
        /**
         * <code>optional uint32 masterKeyId = 8;</code>
         * @return The masterKeyId.
         */
        @java.lang.Override
        public int getMasterKeyId() {
          return masterKeyId_;
        }
        /**
         * <code>optional uint32 masterKeyId = 8;</code>
         * @param value The masterKeyId to set.
         * @return This builder for chaining.
         */
        public Builder setMasterKeyId(int value) {

          masterKeyId_ = value;
          bitField0_ |= 0x00000080;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint32 masterKeyId = 8;</code>
         * @return This builder for chaining.
         */
        public Builder clearMasterKeyId() {
          bitField0_ = (bitField0_ & ~0x00000080);
          masterKeyId_ = 0;
          onChanged();
          return this;
        }

        private long expiryDate_ ;
        /**
         * <code>optional uint64 expiryDate = 9;</code>
         * @return Whether the expiryDate field is set.
         */
        @java.lang.Override
        public boolean hasExpiryDate() {
          return ((bitField0_ & 0x00000100) != 0);
        }
        /**
         * <code>optional uint64 expiryDate = 9;</code>
         * @return The expiryDate.
         */
        @java.lang.Override
        public long getExpiryDate() {
          return expiryDate_;
        }
        /**
         * <code>optional uint64 expiryDate = 9;</code>
         * @param value The expiryDate to set.
         * @return This builder for chaining.
         */
        public Builder setExpiryDate(long value) {

          expiryDate_ = value;
          bitField0_ |= 0x00000100;
          onChanged();
          return this;
        }
        /**
         * <code>optional uint64 expiryDate = 9;</code>
         * @return This builder for chaining.
         */
        public Builder clearExpiryDate() {
          bitField0_ = (bitField0_ & ~0x00000100);
          expiryDate_ = 0L;
          onChanged();
          return this;
        }
        @java.lang.Override
        public final Builder setUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.setUnknownFields(unknownFields);
        }

        @java.lang.Override
        public final Builder mergeUnknownFields(
            final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
          return super.mergeUnknownFields(unknownFields);
        }


        // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
      }

      // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection.PersistToken)
      private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken DEFAULT_INSTANCE;
      static {
        DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken();
      }

      public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstance() {
        return DEFAULT_INSTANCE;
      }

      @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<PersistToken>
          PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<PersistToken>() {
        @java.lang.Override
        public PersistToken parsePartialFrom(
            org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
            org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
            throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
          Builder builder = newBuilder();
          try {
            builder.mergeFrom(input, extensionRegistry);
          } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
            throw e.setUnfinishedMessage(builder.buildPartial());
          } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
            throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
          } catch (java.io.IOException e) {
            throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
                .setUnfinishedMessage(builder.buildPartial());
          }
          return builder.buildPartial();
        }
      };

      public static org.apache.hadoop.thirdparty.protobuf.Parser<PersistToken> parser() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Parser<PersistToken> getParserForType() {
        return PARSER;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.PersistToken getDefaultInstanceForType() {
        return DEFAULT_INSTANCE;
      }

    }

    private int bitField0_;
    public static final int CURRENTID_FIELD_NUMBER = 1;
    private int currentId_ = 0;
    /**
     * <code>optional uint32 currentId = 1;</code>
     * @return Whether the currentId field is set.
     */
    @java.lang.Override
    public boolean hasCurrentId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint32 currentId = 1;</code>
     * @return The currentId.
     */
    @java.lang.Override
    public int getCurrentId() {
      return currentId_;
    }

    public static final int TOKENSEQUENCENUMBER_FIELD_NUMBER = 2;
    private int tokenSequenceNumber_ = 0;
    /**
     * <code>optional uint32 tokenSequenceNumber = 2;</code>
     * @return Whether the tokenSequenceNumber field is set.
     */
    @java.lang.Override
    public boolean hasTokenSequenceNumber() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint32 tokenSequenceNumber = 2;</code>
     * @return The tokenSequenceNumber.
     */
    @java.lang.Override
    public int getTokenSequenceNumber() {
      return tokenSequenceNumber_;
    }

    public static final int NUMKEYS_FIELD_NUMBER = 3;
    private int numKeys_ = 0;
    /**
     * <code>optional uint32 numKeys = 3;</code>
     * @return Whether the numKeys field is set.
     */
    @java.lang.Override
    public boolean hasNumKeys() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint32 numKeys = 3;</code>
     * @return The numKeys.
     */
    @java.lang.Override
    public int getNumKeys() {
      return numKeys_;
    }

    public static final int NUMTOKENS_FIELD_NUMBER = 4;
    private int numTokens_ = 0;
    /**
     * <pre>
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * </pre>
     *
     * <code>optional uint32 numTokens = 4;</code>
     * @return Whether the numTokens field is set.
     */
    @java.lang.Override
    public boolean hasNumTokens() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * repeated DelegationKey keys
     * repeated PersistToken tokens
     * </pre>
     *
     * <code>optional uint32 numTokens = 4;</code>
     * @return The numTokens.
     */
    @java.lang.Override
    public int getNumTokens() {
      return numTokens_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, currentId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, tokenSequenceNumber_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, numKeys_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(4, numTokens_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, currentId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, tokenSequenceNumber_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, numKeys_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(4, numTokens_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) obj;

      if (hasCurrentId() != other.hasCurrentId()) return false;
      if (hasCurrentId()) {
        if (getCurrentId()
            != other.getCurrentId()) return false;
      }
      if (hasTokenSequenceNumber() != other.hasTokenSequenceNumber()) return false;
      if (hasTokenSequenceNumber()) {
        if (getTokenSequenceNumber()
            != other.getTokenSequenceNumber()) return false;
      }
      if (hasNumKeys() != other.hasNumKeys()) return false;
      if (hasNumKeys()) {
        if (getNumKeys()
            != other.getNumKeys()) return false;
      }
      if (hasNumTokens() != other.hasNumTokens()) return false;
      if (hasNumTokens()) {
        if (getNumTokens()
            != other.getNumTokens()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCurrentId()) {
        hash = (37 * hash) + CURRENTID_FIELD_NUMBER;
        hash = (53 * hash) + getCurrentId();
      }
      if (hasTokenSequenceNumber()) {
        hash = (37 * hash) + TOKENSEQUENCENUMBER_FIELD_NUMBER;
        hash = (53 * hash) + getTokenSequenceNumber();
      }
      if (hasNumKeys()) {
        hash = (37 * hash) + NUMKEYS_FIELD_NUMBER;
        hash = (53 * hash) + getNumKeys();
      }
      if (hasNumTokens()) {
        hash = (37 * hash) + NUMTOKENS_FIELD_NUMBER;
        hash = (53 * hash) + getNumTokens();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.SecretManagerSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.SecretManagerSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        currentId_ = 0;
        tokenSequenceNumber_ = 0;
        numKeys_ = 0;
        numTokens_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.currentId_ = currentId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.tokenSequenceNumber_ = tokenSequenceNumber_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.numKeys_ = numKeys_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.numTokens_ = numTokens_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection.getDefaultInstance()) return this;
        if (other.hasCurrentId()) {
          setCurrentId(other.getCurrentId());
        }
        if (other.hasTokenSequenceNumber()) {
          setTokenSequenceNumber(other.getTokenSequenceNumber());
        }
        if (other.hasNumKeys()) {
          setNumKeys(other.getNumKeys());
        }
        if (other.hasNumTokens()) {
          setNumTokens(other.getNumTokens());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                currentId_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                tokenSequenceNumber_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                numKeys_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                numTokens_ = input.readUInt32();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int currentId_ ;
      /**
       * <code>optional uint32 currentId = 1;</code>
       * @return Whether the currentId field is set.
       */
      @java.lang.Override
      public boolean hasCurrentId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint32 currentId = 1;</code>
       * @return The currentId.
       */
      @java.lang.Override
      public int getCurrentId() {
        return currentId_;
      }
      /**
       * <code>optional uint32 currentId = 1;</code>
       * @param value The currentId to set.
       * @return This builder for chaining.
       */
      public Builder setCurrentId(int value) {

        currentId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 currentId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCurrentId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        currentId_ = 0;
        onChanged();
        return this;
      }

      private int tokenSequenceNumber_ ;
      /**
       * <code>optional uint32 tokenSequenceNumber = 2;</code>
       * @return Whether the tokenSequenceNumber field is set.
       */
      @java.lang.Override
      public boolean hasTokenSequenceNumber() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint32 tokenSequenceNumber = 2;</code>
       * @return The tokenSequenceNumber.
       */
      @java.lang.Override
      public int getTokenSequenceNumber() {
        return tokenSequenceNumber_;
      }
      /**
       * <code>optional uint32 tokenSequenceNumber = 2;</code>
       * @param value The tokenSequenceNumber to set.
       * @return This builder for chaining.
       */
      public Builder setTokenSequenceNumber(int value) {

        tokenSequenceNumber_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 tokenSequenceNumber = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearTokenSequenceNumber() {
        bitField0_ = (bitField0_ & ~0x00000002);
        tokenSequenceNumber_ = 0;
        onChanged();
        return this;
      }

      private int numKeys_ ;
      /**
       * <code>optional uint32 numKeys = 3;</code>
       * @return Whether the numKeys field is set.
       */
      @java.lang.Override
      public boolean hasNumKeys() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint32 numKeys = 3;</code>
       * @return The numKeys.
       */
      @java.lang.Override
      public int getNumKeys() {
        return numKeys_;
      }
      /**
       * <code>optional uint32 numKeys = 3;</code>
       * @param value The numKeys to set.
       * @return This builder for chaining.
       */
      public Builder setNumKeys(int value) {

        numKeys_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 numKeys = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumKeys() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numKeys_ = 0;
        onChanged();
        return this;
      }

      private int numTokens_ ;
      /**
       * <pre>
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * </pre>
       *
       * <code>optional uint32 numTokens = 4;</code>
       * @return Whether the numTokens field is set.
       */
      @java.lang.Override
      public boolean hasNumTokens() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * </pre>
       *
       * <code>optional uint32 numTokens = 4;</code>
       * @return The numTokens.
       */
      @java.lang.Override
      public int getNumTokens() {
        return numTokens_;
      }
      /**
       * <pre>
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * </pre>
       *
       * <code>optional uint32 numTokens = 4;</code>
       * @param value The numTokens to set.
       * @return This builder for chaining.
       */
      public Builder setNumTokens(int value) {

        numTokens_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * repeated DelegationKey keys
       * repeated PersistToken tokens
       * </pre>
       *
       * <code>optional uint32 numTokens = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumTokens() {
        bitField0_ = (bitField0_ & ~0x00000008);
        numTokens_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.SecretManagerSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.SecretManagerSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SecretManagerSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SecretManagerSection>() {
      @java.lang.Override
      public SecretManagerSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SecretManagerSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SecretManagerSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CacheManagerSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.CacheManagerSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 nextDirectiveId = 1;</code>
     * @return Whether the nextDirectiveId field is set.
     */
    boolean hasNextDirectiveId();
    /**
     * <code>required uint64 nextDirectiveId = 1;</code>
     * @return The nextDirectiveId.
     */
    long getNextDirectiveId();

    /**
     * <code>required uint32 numPools = 2;</code>
     * @return Whether the numPools field is set.
     */
    boolean hasNumPools();
    /**
     * <code>required uint32 numPools = 2;</code>
     * @return The numPools.
     */
    int getNumPools();

    /**
     * <pre>
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * </pre>
     *
     * <code>required uint32 numDirectives = 3;</code>
     * @return Whether the numDirectives field is set.
     */
    boolean hasNumDirectives();
    /**
     * <pre>
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * </pre>
     *
     * <code>required uint32 numDirectives = 3;</code>
     * @return The numDirectives.
     */
    int getNumDirectives();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection}
   */
  public static final class CacheManagerSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.CacheManagerSection)
      CacheManagerSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CacheManagerSection.newBuilder() to construct.
    private CacheManagerSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CacheManagerSection() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CacheManagerSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class);
    }

    private int bitField0_;
    public static final int NEXTDIRECTIVEID_FIELD_NUMBER = 1;
    private long nextDirectiveId_ = 0L;
    /**
     * <code>required uint64 nextDirectiveId = 1;</code>
     * @return Whether the nextDirectiveId field is set.
     */
    @java.lang.Override
    public boolean hasNextDirectiveId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 nextDirectiveId = 1;</code>
     * @return The nextDirectiveId.
     */
    @java.lang.Override
    public long getNextDirectiveId() {
      return nextDirectiveId_;
    }

    public static final int NUMPOOLS_FIELD_NUMBER = 2;
    private int numPools_ = 0;
    /**
     * <code>required uint32 numPools = 2;</code>
     * @return Whether the numPools field is set.
     */
    @java.lang.Override
    public boolean hasNumPools() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint32 numPools = 2;</code>
     * @return The numPools.
     */
    @java.lang.Override
    public int getNumPools() {
      return numPools_;
    }

    public static final int NUMDIRECTIVES_FIELD_NUMBER = 3;
    private int numDirectives_ = 0;
    /**
     * <pre>
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * </pre>
     *
     * <code>required uint32 numDirectives = 3;</code>
     * @return Whether the numDirectives field is set.
     */
    @java.lang.Override
    public boolean hasNumDirectives() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * repeated CachePoolInfoProto pools
     * repeated CacheDirectiveInfoProto directives
     * </pre>
     *
     * <code>required uint32 numDirectives = 3;</code>
     * @return The numDirectives.
     */
    @java.lang.Override
    public int getNumDirectives() {
      return numDirectives_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasNextDirectiveId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumPools()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumDirectives()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, nextDirectiveId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, numPools_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, numDirectives_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, nextDirectiveId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, numPools_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, numDirectives_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) obj;

      if (hasNextDirectiveId() != other.hasNextDirectiveId()) return false;
      if (hasNextDirectiveId()) {
        if (getNextDirectiveId()
            != other.getNextDirectiveId()) return false;
      }
      if (hasNumPools() != other.hasNumPools()) return false;
      if (hasNumPools()) {
        if (getNumPools()
            != other.getNumPools()) return false;
      }
      if (hasNumDirectives() != other.hasNumDirectives()) return false;
      if (hasNumDirectives()) {
        if (getNumDirectives()
            != other.getNumDirectives()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasNextDirectiveId()) {
        hash = (37 * hash) + NEXTDIRECTIVEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNextDirectiveId());
      }
      if (hasNumPools()) {
        hash = (37 * hash) + NUMPOOLS_FIELD_NUMBER;
        hash = (53 * hash) + getNumPools();
      }
      if (hasNumDirectives()) {
        hash = (37 * hash) + NUMDIRECTIVES_FIELD_NUMBER;
        hash = (53 * hash) + getNumDirectives();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.CacheManagerSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.CacheManagerSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        nextDirectiveId_ = 0L;
        numPools_ = 0;
        numDirectives_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.nextDirectiveId_ = nextDirectiveId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.numPools_ = numPools_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.numDirectives_ = numDirectives_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection.getDefaultInstance()) return this;
        if (other.hasNextDirectiveId()) {
          setNextDirectiveId(other.getNextDirectiveId());
        }
        if (other.hasNumPools()) {
          setNumPools(other.getNumPools());
        }
        if (other.hasNumDirectives()) {
          setNumDirectives(other.getNumDirectives());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasNextDirectiveId()) {
          return false;
        }
        if (!hasNumPools()) {
          return false;
        }
        if (!hasNumDirectives()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                nextDirectiveId_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                numPools_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                numDirectives_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long nextDirectiveId_ ;
      /**
       * <code>required uint64 nextDirectiveId = 1;</code>
       * @return Whether the nextDirectiveId field is set.
       */
      @java.lang.Override
      public boolean hasNextDirectiveId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 nextDirectiveId = 1;</code>
       * @return The nextDirectiveId.
       */
      @java.lang.Override
      public long getNextDirectiveId() {
        return nextDirectiveId_;
      }
      /**
       * <code>required uint64 nextDirectiveId = 1;</code>
       * @param value The nextDirectiveId to set.
       * @return This builder for chaining.
       */
      public Builder setNextDirectiveId(long value) {

        nextDirectiveId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 nextDirectiveId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearNextDirectiveId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        nextDirectiveId_ = 0L;
        onChanged();
        return this;
      }

      private int numPools_ ;
      /**
       * <code>required uint32 numPools = 2;</code>
       * @return Whether the numPools field is set.
       */
      @java.lang.Override
      public boolean hasNumPools() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 numPools = 2;</code>
       * @return The numPools.
       */
      @java.lang.Override
      public int getNumPools() {
        return numPools_;
      }
      /**
       * <code>required uint32 numPools = 2;</code>
       * @param value The numPools to set.
       * @return This builder for chaining.
       */
      public Builder setNumPools(int value) {

        numPools_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 numPools = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumPools() {
        bitField0_ = (bitField0_ & ~0x00000002);
        numPools_ = 0;
        onChanged();
        return this;
      }

      private int numDirectives_ ;
      /**
       * <pre>
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * </pre>
       *
       * <code>required uint32 numDirectives = 3;</code>
       * @return Whether the numDirectives field is set.
       */
      @java.lang.Override
      public boolean hasNumDirectives() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * </pre>
       *
       * <code>required uint32 numDirectives = 3;</code>
       * @return The numDirectives.
       */
      @java.lang.Override
      public int getNumDirectives() {
        return numDirectives_;
      }
      /**
       * <pre>
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * </pre>
       *
       * <code>required uint32 numDirectives = 3;</code>
       * @param value The numDirectives to set.
       * @return This builder for chaining.
       */
      public Builder setNumDirectives(int value) {

        numDirectives_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * repeated CachePoolInfoProto pools
       * repeated CacheDirectiveInfoProto directives
       * </pre>
       *
       * <code>required uint32 numDirectives = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumDirectives() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numDirectives_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.CacheManagerSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.CacheManagerSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CacheManagerSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CacheManagerSection>() {
      @java.lang.Override
      public CacheManagerSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CacheManagerSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CacheManagerSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ErasureCodingSectionOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.fsimage.ErasureCodingSection)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> 
        getPoliciesList();
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index);
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    int getPoliciesCount();
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
        getPoliciesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder(
        int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.fsimage.ErasureCodingSection}
   */
  public static final class ErasureCodingSection extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.fsimage.ErasureCodingSection)
      ErasureCodingSectionOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ErasureCodingSection.newBuilder() to construct.
    private ErasureCodingSection(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ErasureCodingSection() {
      policies_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ErasureCodingSection();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.Builder.class);
    }

    public static final int POLICIES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> policies_;
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> getPoliciesList() {
      return policies_;
    }
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
        getPoliciesOrBuilderList() {
      return policies_;
    }
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    @java.lang.Override
    public int getPoliciesCount() {
      return policies_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index) {
      return policies_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder(
        int index) {
      return policies_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getPoliciesCount(); i++) {
        if (!getPolicies(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < policies_.size(); i++) {
        output.writeMessage(1, policies_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < policies_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, policies_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection other = (org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection) obj;

      if (!getPoliciesList()
          .equals(other.getPoliciesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getPoliciesCount() > 0) {
        hash = (37 * hash) + POLICIES_FIELD_NUMBER;
        hash = (53 * hash) + getPoliciesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.fsimage.ErasureCodingSection}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.fsimage.ErasureCodingSection)
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSectionOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.class, org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (policiesBuilder_ == null) {
          policies_ = java.util.Collections.emptyList();
        } else {
          policies_ = null;
          policiesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection build() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection buildPartial() {
        org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result) {
        if (policiesBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            policies_ = java.util.Collections.unmodifiableList(policies_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.policies_ = policies_;
        } else {
          result.policies_ = policiesBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection) {
          return mergeFrom((org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection other) {
        if (other == org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection.getDefaultInstance()) return this;
        if (policiesBuilder_ == null) {
          if (!other.policies_.isEmpty()) {
            if (policies_.isEmpty()) {
              policies_ = other.policies_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensurePoliciesIsMutable();
              policies_.addAll(other.policies_);
            }
            onChanged();
          }
        } else {
          if (!other.policies_.isEmpty()) {
            if (policiesBuilder_.isEmpty()) {
              policiesBuilder_.dispose();
              policiesBuilder_ = null;
              policies_ = other.policies_;
              bitField0_ = (bitField0_ & ~0x00000001);
              policiesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getPoliciesFieldBuilder() : null;
            } else {
              policiesBuilder_.addAllMessages(other.policies_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getPoliciesCount(); i++) {
          if (!getPolicies(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.PARSER,
                        extensionRegistry);
                if (policiesBuilder_ == null) {
                  ensurePoliciesIsMutable();
                  policies_.add(m);
                } else {
                  policiesBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> policies_ =
        java.util.Collections.emptyList();
      private void ensurePoliciesIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          policies_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto>(policies_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policiesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> getPoliciesList() {
        if (policiesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(policies_);
        } else {
          return policiesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public int getPoliciesCount() {
        if (policiesBuilder_ == null) {
          return policies_.size();
        } else {
          return policiesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicies(int index) {
        if (policiesBuilder_ == null) {
          return policies_.get(index);
        } else {
          return policiesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder setPolicies(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (policiesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePoliciesIsMutable();
          policies_.set(index, value);
          onChanged();
        } else {
          policiesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder setPolicies(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (policiesBuilder_ == null) {
          ensurePoliciesIsMutable();
          policies_.set(index, builderForValue.build());
          onChanged();
        } else {
          policiesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder addPolicies(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (policiesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePoliciesIsMutable();
          policies_.add(value);
          onChanged();
        } else {
          policiesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder addPolicies(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (policiesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePoliciesIsMutable();
          policies_.add(index, value);
          onChanged();
        } else {
          policiesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder addPolicies(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (policiesBuilder_ == null) {
          ensurePoliciesIsMutable();
          policies_.add(builderForValue.build());
          onChanged();
        } else {
          policiesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder addPolicies(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (policiesBuilder_ == null) {
          ensurePoliciesIsMutable();
          policies_.add(index, builderForValue.build());
          onChanged();
        } else {
          policiesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder addAllPolicies(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto> values) {
        if (policiesBuilder_ == null) {
          ensurePoliciesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, policies_);
          onChanged();
        } else {
          policiesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder clearPolicies() {
        if (policiesBuilder_ == null) {
          policies_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          policiesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public Builder removePolicies(int index) {
        if (policiesBuilder_ == null) {
          ensurePoliciesIsMutable();
          policies_.remove(index);
          onChanged();
        } else {
          policiesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPoliciesBuilder(
          int index) {
        return getPoliciesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPoliciesOrBuilder(
          int index) {
        if (policiesBuilder_ == null) {
          return policies_.get(index);  } else {
          return policiesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
           getPoliciesOrBuilderList() {
        if (policiesBuilder_ != null) {
          return policiesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(policies_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder addPoliciesBuilder() {
        return getPoliciesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder addPoliciesBuilder(
          int index) {
        return getPoliciesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.ErasureCodingPolicyProto policies = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder> 
           getPoliciesBuilderList() {
        return getPoliciesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
          getPoliciesFieldBuilder() {
        if (policiesBuilder_ == null) {
          policiesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
                  policies_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          policies_ = null;
        }
        return policiesBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.fsimage.ErasureCodingSection)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.fsimage.ErasureCodingSection)
    private static final org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection();
    }

    public static org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingSection>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ErasureCodingSection>() {
      @java.lang.Override
      public ErasureCodingSection parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingSection> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingSection> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.server.namenode.FsImageProto.ErasureCodingSection getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable;

  public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static  org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\rfsimage.proto\022\023hadoop.hdfs.fsimage\032\nhd" +
      "fs.proto\032\tacl.proto\032\013xattr.proto\"\277\001\n\013Fil" +
      "eSummary\022\025\n\rondiskVersion\030\001 \002(\r\022\025\n\rlayou" +
      "tVersion\030\002 \002(\r\022\r\n\005codec\030\003 \001(\t\022:\n\010section" +
      "s\030\004 \003(\0132(.hadoop.hdfs.fsimage.FileSummar" +
      "y.Section\0327\n\007Section\022\014\n\004name\030\001 \001(\t\022\016\n\006le" +
      "ngth\030\002 \001(\004\022\016\n\006offset\030\003 \001(\004\"\344\001\n\021NameSyste" +
      "mSection\022\023\n\013namespaceId\030\001 \001(\r\022\022\n\ngenstam" +
      "pV1\030\002 \001(\004\022\022\n\ngenstampV2\030\003 \001(\004\022\027\n\017genstam" +
      "pV1Limit\030\004 \001(\004\022\034\n\024lastAllocatedBlockId\030\005" +
      " \001(\004\022\025\n\rtransactionId\030\006 \001(\004\022\037\n\027rollingUp" +
      "gradeStartTime\030\007 \001(\004\022#\n\033lastAllocatedStr" +
      "ipedBlockId\030\010 \001(\004\"\340\r\n\014INodeSection\022\023\n\013la" +
      "stInodeId\030\001 \001(\004\022\021\n\tnumInodes\030\002 \001(\004\032I\n\034Fi" +
      "leUnderConstructionFeature\022\022\n\nclientName" +
      "\030\001 \001(\t\022\025\n\rclientMachine\030\002 \001(\t\032&\n\017AclFeat" +
      "ureProto\022\023\n\007entries\030\002 \003(\007B\002\020\001\0320\n\021XAttrCo" +
      "mpactProto\022\014\n\004name\030\001 \002(\007\022\r\n\005value\030\002 \001(\014\032" +
      "X\n\021XAttrFeatureProto\022C\n\006xAttrs\030\001 \003(\01323.h" +
      "adoop.hdfs.fsimage.INodeSection.XAttrCom" +
      "pactProto\032\344\003\n\tINodeFile\022\023\n\013replication\030\001" +
      " \001(\r\022\030\n\020modificationTime\030\002 \001(\004\022\022\n\naccess" +
      "Time\030\003 \001(\004\022\032\n\022preferredBlockSize\030\004 \001(\004\022\022" +
      "\n\npermission\030\005 \001(\006\022\'\n\006blocks\030\006 \003(\0132\027.had" +
      "oop.hdfs.BlockProto\022N\n\006fileUC\030\007 \001(\0132>.ha" +
      "doop.hdfs.fsimage.INodeSection.FileUnder" +
      "ConstructionFeature\022>\n\003acl\030\010 \001(\01321.hadoo" +
      "p.hdfs.fsimage.INodeSection.AclFeaturePr" +
      "oto\022C\n\006xAttrs\030\t \001(\01323.hadoop.hdfs.fsimag" +
      "e.INodeSection.XAttrFeatureProto\022\027\n\017stor" +
      "agePolicyID\030\n \001(\r\022.\n\tblockType\030\013 \001(\0162\033.h" +
      "adoop.hdfs.BlockTypeProto\022\035\n\025erasureCodi" +
      "ngPolicyID\030\014 \001(\r\032a\n\034QuotaByStorageTypeEn" +
      "tryProto\0222\n\013storageType\030\001 \002(\0162\035.hadoop.h" +
      "dfs.StorageTypeProto\022\r\n\005quota\030\002 \002(\004\032p\n\036Q" +
      "uotaByStorageTypeFeatureProto\022N\n\006quotas\030" +
      "\001 \003(\0132>.hadoop.hdfs.fsimage.INodeSection" +
      ".QuotaByStorageTypeEntryProto\032\273\002\n\016INodeD" +
      "irectory\022\030\n\020modificationTime\030\001 \001(\004\022\017\n\007ns" +
      "Quota\030\002 \001(\004\022\017\n\007dsQuota\030\003 \001(\004\022\022\n\npermissi" +
      "on\030\004 \001(\006\022>\n\003acl\030\005 \001(\01321.hadoop.hdfs.fsim" +
      "age.INodeSection.AclFeatureProto\022C\n\006xAtt" +
      "rs\030\006 \001(\01323.hadoop.hdfs.fsimage.INodeSect" +
      "ion.XAttrFeatureProto\022T\n\ntypeQuotas\030\007 \001(" +
      "\0132@.hadoop.hdfs.fsimage.INodeSection.Quo" +
      "taByStorageTypeFeatureProto\032`\n\014INodeSyml" +
      "ink\022\022\n\npermission\030\001 \001(\006\022\016\n\006target\030\002 \001(\014\022" +
      "\030\n\020modificationTime\030\003 \001(\004\022\022\n\naccessTime\030" +
      "\004 \001(\004\032\314\002\n\005INode\022:\n\004type\030\001 \002(\0162,.hadoop.h" +
      "dfs.fsimage.INodeSection.INode.Type\022\n\n\002i" +
      "d\030\002 \002(\004\022\014\n\004name\030\003 \001(\014\0229\n\004file\030\004 \001(\0132+.ha" +
      "doop.hdfs.fsimage.INodeSection.INodeFile" +
      "\022C\n\tdirectory\030\005 \001(\01320.hadoop.hdfs.fsimag" +
      "e.INodeSection.INodeDirectory\022?\n\007symlink" +
      "\030\006 \001(\0132..hadoop.hdfs.fsimage.INodeSectio" +
      "n.INodeSymlink\",\n\004Type\022\010\n\004FILE\020\001\022\r\n\tDIRE" +
      "CTORY\020\002\022\013\n\007SYMLINK\020\003\"`\n\035FilesUnderConstr" +
      "uctionSection\032?\n\032FileUnderConstructionEn" +
      "try\022\017\n\007inodeId\030\001 \001(\004\022\020\n\010fullPath\030\002 \001(\t\"b" +
      "\n\025INodeDirectorySection\032I\n\010DirEntry\022\016\n\006p" +
      "arent\030\001 \001(\004\022\024\n\010children\030\002 \003(\004B\002\020\001\022\027\n\013ref" +
      "Children\030\003 \003(\rB\002\020\001\"z\n\025INodeReferenceSect" +
      "ion\032a\n\016INodeReference\022\022\n\nreferredId\030\001 \001(" +
      "\004\022\014\n\004name\030\002 \001(\014\022\025\n\rdstSnapshotId\030\003 \001(\r\022\026" +
      "\n\016lastSnapshotId\030\004 \001(\r\"\265\001\n\017SnapshotSecti" +
      "on\022\027\n\017snapshotCounter\030\001 \001(\r\022\034\n\020snapshott" +
      "ableDir\030\002 \003(\004B\002\020\001\022\024\n\014numSnapshots\030\003 \001(\r\032" +
      "U\n\010Snapshot\022\022\n\nsnapshotId\030\001 \001(\r\0225\n\004root\030" +
      "\002 \001(\0132\'.hadoop.hdfs.fsimage.INodeSection" +
      ".INode\"\200\005\n\023SnapshotDiffSection\032 \n\020Create" +
      "dListEntry\022\014\n\004name\030\001 \001(\014\032\367\001\n\rDirectoryDi" +
      "ff\022\022\n\nsnapshotId\030\001 \001(\r\022\024\n\014childrenSize\030\002" +
      " \001(\r\022\026\n\016isSnapshotRoot\030\003 \001(\010\022\014\n\004name\030\004 \001" +
      "(\014\022F\n\014snapshotCopy\030\005 \001(\01320.hadoop.hdfs.f" +
      "simage.INodeSection.INodeDirectory\022\027\n\017cr" +
      "eatedListSize\030\006 \001(\r\022\030\n\014deletedINode\030\007 \003(" +
      "\004B\002\020\001\022\033\n\017deletedINodeRef\030\010 \003(\rB\002\020\001\032\252\001\n\010F" +
      "ileDiff\022\022\n\nsnapshotId\030\001 \001(\r\022\020\n\010fileSize\030" +
      "\002 \001(\004\022\014\n\004name\030\003 \001(\014\022A\n\014snapshotCopy\030\004 \001(" +
      "\0132+.hadoop.hdfs.fsimage.INodeSection.INo" +
      "deFile\022\'\n\006blocks\030\005 \003(\0132\027.hadoop.hdfs.Blo" +
      "ckProto\032\237\001\n\tDiffEntry\022E\n\004type\030\001 \002(\01627.ha" +
      "doop.hdfs.fsimage.SnapshotDiffSection.Di" +
      "ffEntry.Type\022\017\n\007inodeId\030\002 \001(\004\022\021\n\tnumOfDi" +
      "ff\030\003 \001(\r\"\'\n\004Type\022\014\n\010FILEDIFF\020\001\022\021\n\rDIRECT" +
      "ORYDIFF\020\002\"]\n\022StringTableSection\022\020\n\010numEn" +
      "try\030\001 \001(\r\022\023\n\010maskBits\030\002 \001(\r:\0010\032 \n\005Entry\022" +
      "\n\n\002id\030\001 \001(\r\022\013\n\003str\030\002 \001(\t\"\341\002\n\024SecretManag" +
      "erSection\022\021\n\tcurrentId\030\001 \001(\r\022\033\n\023tokenSeq" +
      "uenceNumber\030\002 \001(\r\022\017\n\007numKeys\030\003 \001(\r\022\021\n\tnu" +
      "mTokens\030\004 \001(\r\032<\n\rDelegationKey\022\n\n\002id\030\001 \001" +
      "(\r\022\022\n\nexpiryDate\030\002 \001(\004\022\013\n\003key\030\003 \001(\014\032\266\001\n\014" +
      "PersistToken\022\017\n\007version\030\001 \001(\r\022\r\n\005owner\030\002" +
      " \001(\t\022\017\n\007renewer\030\003 \001(\t\022\020\n\010realUser\030\004 \001(\t\022" +
      "\021\n\tissueDate\030\005 \001(\004\022\017\n\007maxDate\030\006 \001(\004\022\026\n\016s" +
      "equenceNumber\030\007 \001(\r\022\023\n\013masterKeyId\030\010 \001(\r" +
      "\022\022\n\nexpiryDate\030\t \001(\004\"W\n\023CacheManagerSect" +
      "ion\022\027\n\017nextDirectiveId\030\001 \002(\004\022\020\n\010numPools" +
      "\030\002 \002(\r\022\025\n\rnumDirectives\030\003 \002(\r\"O\n\024Erasure" +
      "CodingSection\0227\n\010policies\030\001 \003(\0132%.hadoop" +
      ".hdfs.ErasureCodingPolicyProtoB6\n&org.ap" +
      "ache.hadoop.hdfs.server.namenodeB\014FsImag" +
      "eProto"
    };
    descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor(),
        });
    internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor =
      getDescriptor().getMessageTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_FileSummary_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor,
        new java.lang.String[] { "OndiskVersion", "LayoutVersion", "Codec", "Sections", });
    internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor =
      internal_static_hadoop_hdfs_fsimage_FileSummary_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_FileSummary_Section_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_FileSummary_Section_descriptor,
        new java.lang.String[] { "Name", "Length", "Offset", });
    internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor =
      getDescriptor().getMessageTypes().get(1);
    internal_static_hadoop_hdfs_fsimage_NameSystemSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_NameSystemSection_descriptor,
        new java.lang.String[] { "NamespaceId", "GenstampV1", "GenstampV2", "GenstampV1Limit", "LastAllocatedBlockId", "TransactionId", "RollingUpgradeStartTime", "LastAllocatedStripedBlockId", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor =
      getDescriptor().getMessageTypes().get(2);
    internal_static_hadoop_hdfs_fsimage_INodeSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor,
        new java.lang.String[] { "LastInodeId", "NumInodes", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_FileUnderConstructionFeature_descriptor,
        new java.lang.String[] { "ClientName", "ClientMachine", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(1);
    internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_AclFeatureProto_descriptor,
        new java.lang.String[] { "Entries", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(2);
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrCompactProto_descriptor,
        new java.lang.String[] { "Name", "Value", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(3);
    internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_XAttrFeatureProto_descriptor,
        new java.lang.String[] { "XAttrs", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(4);
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_INodeFile_descriptor,
        new java.lang.String[] { "Replication", "ModificationTime", "AccessTime", "PreferredBlockSize", "Permission", "Blocks", "FileUC", "Acl", "XAttrs", "StoragePolicyID", "BlockType", "ErasureCodingPolicyID", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(5);
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeEntryProto_descriptor,
        new java.lang.String[] { "StorageType", "Quota", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(6);
    internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_QuotaByStorageTypeFeatureProto_descriptor,
        new java.lang.String[] { "Quotas", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(7);
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_INodeDirectory_descriptor,
        new java.lang.String[] { "ModificationTime", "NsQuota", "DsQuota", "Permission", "Acl", "XAttrs", "TypeQuotas", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(8);
    internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_INodeSymlink_descriptor,
        new java.lang.String[] { "Permission", "Target", "ModificationTime", "AccessTime", });
    internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeSection_descriptor.getNestedTypes().get(9);
    internal_static_hadoop_hdfs_fsimage_INodeSection_INode_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeSection_INode_descriptor,
        new java.lang.String[] { "Type", "Id", "Name", "File", "Directory", "Symlink", });
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor =
      getDescriptor().getMessageTypes().get(3);
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor =
      internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_FilesUnderConstructionSection_FileUnderConstructionEntry_descriptor,
        new java.lang.String[] { "InodeId", "FullPath", });
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor =
      getDescriptor().getMessageTypes().get(4);
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeDirectorySection_DirEntry_descriptor,
        new java.lang.String[] { "Parent", "Children", "RefChildren", });
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor =
      getDescriptor().getMessageTypes().get(5);
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor =
      internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_INodeReferenceSection_INodeReference_descriptor,
        new java.lang.String[] { "ReferredId", "Name", "DstSnapshotId", "LastSnapshotId", });
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor =
      getDescriptor().getMessageTypes().get(6);
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor,
        new java.lang.String[] { "SnapshotCounter", "SnapshottableDir", "NumSnapshots", });
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor =
      internal_static_hadoop_hdfs_fsimage_SnapshotSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotSection_Snapshot_descriptor,
        new java.lang.String[] { "SnapshotId", "Root", });
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor =
      getDescriptor().getMessageTypes().get(7);
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor,
        new java.lang.String[] { });
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor =
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_CreatedListEntry_descriptor,
        new java.lang.String[] { "Name", });
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor =
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(1);
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DirectoryDiff_descriptor,
        new java.lang.String[] { "SnapshotId", "ChildrenSize", "IsSnapshotRoot", "Name", "SnapshotCopy", "CreatedListSize", "DeletedINode", "DeletedINodeRef", });
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor =
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(2);
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_FileDiff_descriptor,
        new java.lang.String[] { "SnapshotId", "FileSize", "Name", "SnapshotCopy", "Blocks", });
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor =
      internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_descriptor.getNestedTypes().get(3);
    internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SnapshotDiffSection_DiffEntry_descriptor,
        new java.lang.String[] { "Type", "InodeId", "NumOfDiff", });
    internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor =
      getDescriptor().getMessageTypes().get(8);
    internal_static_hadoop_hdfs_fsimage_StringTableSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor,
        new java.lang.String[] { "NumEntry", "MaskBits", });
    internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor =
      internal_static_hadoop_hdfs_fsimage_StringTableSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_StringTableSection_Entry_descriptor,
        new java.lang.String[] { "Id", "Str", });
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor =
      getDescriptor().getMessageTypes().get(9);
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor,
        new java.lang.String[] { "CurrentId", "TokenSequenceNumber", "NumKeys", "NumTokens", });
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor =
      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SecretManagerSection_DelegationKey_descriptor,
        new java.lang.String[] { "Id", "ExpiryDate", "Key", });
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor =
      internal_static_hadoop_hdfs_fsimage_SecretManagerSection_descriptor.getNestedTypes().get(1);
    internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_SecretManagerSection_PersistToken_descriptor,
        new java.lang.String[] { "Version", "Owner", "Renewer", "RealUser", "IssueDate", "MaxDate", "SequenceNumber", "MasterKeyId", "ExpiryDate", });
    internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor =
      getDescriptor().getMessageTypes().get(10);
    internal_static_hadoop_hdfs_fsimage_CacheManagerSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_CacheManagerSection_descriptor,
        new java.lang.String[] { "NextDirectiveId", "NumPools", "NumDirectives", });
    internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor =
      getDescriptor().getMessageTypes().get(11);
    internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_fsimage_ErasureCodingSection_descriptor,
        new java.lang.String[] { "Policies", });
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.getDescriptor();
  }

  // @@protoc_insertion_point(outer_class_scope)
}