HdfsProtos.java

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: hdfs.proto

// Protobuf Java Version: 3.25.5
package org.apache.hadoop.hdfs.protocol.proto;

public final class HdfsProtos {
  private HdfsProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  /**
   * <pre>
   **
   * Types of recognized storage media.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.StorageTypeProto}
   */
  public enum StorageTypeProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>DISK = 1;</code>
     */
    DISK(1),
    /**
     * <code>SSD = 2;</code>
     */
    SSD(2),
    /**
     * <code>ARCHIVE = 3;</code>
     */
    ARCHIVE(3),
    /**
     * <code>RAM_DISK = 4;</code>
     */
    RAM_DISK(4),
    /**
     * <code>PROVIDED = 5;</code>
     */
    PROVIDED(5),
    /**
     * <code>NVDIMM = 6;</code>
     */
    NVDIMM(6),
    ;

    /**
     * <code>DISK = 1;</code>
     */
    public static final int DISK_VALUE = 1;
    /**
     * <code>SSD = 2;</code>
     */
    public static final int SSD_VALUE = 2;
    /**
     * <code>ARCHIVE = 3;</code>
     */
    public static final int ARCHIVE_VALUE = 3;
    /**
     * <code>RAM_DISK = 4;</code>
     */
    public static final int RAM_DISK_VALUE = 4;
    /**
     * <code>PROVIDED = 5;</code>
     */
    public static final int PROVIDED_VALUE = 5;
    /**
     * <code>NVDIMM = 6;</code>
     */
    public static final int NVDIMM_VALUE = 6;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static StorageTypeProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static StorageTypeProto forNumber(int value) {
      switch (value) {
        case 1: return DISK;
        case 2: return SSD;
        case 3: return ARCHIVE;
        case 4: return RAM_DISK;
        case 5: return PROVIDED;
        case 6: return NVDIMM;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<StorageTypeProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        StorageTypeProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<StorageTypeProto>() {
            public StorageTypeProto findValueByNumber(int number) {
              return StorageTypeProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(0);
    }

    private static final StorageTypeProto[] VALUES = values();

    public static StorageTypeProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private StorageTypeProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.StorageTypeProto)
  }

  /**
   * <pre>
   **
   * Types of recognized blocks.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.BlockTypeProto}
   */
  public enum BlockTypeProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>CONTIGUOUS = 0;</code>
     */
    CONTIGUOUS(0),
    /**
     * <code>STRIPED = 1;</code>
     */
    STRIPED(1),
    ;

    /**
     * <code>CONTIGUOUS = 0;</code>
     */
    public static final int CONTIGUOUS_VALUE = 0;
    /**
     * <code>STRIPED = 1;</code>
     */
    public static final int STRIPED_VALUE = 1;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static BlockTypeProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static BlockTypeProto forNumber(int value) {
      switch (value) {
        case 0: return CONTIGUOUS;
        case 1: return STRIPED;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockTypeProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        BlockTypeProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockTypeProto>() {
            public BlockTypeProto findValueByNumber(int number) {
              return BlockTypeProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(1);
    }

    private static final BlockTypeProto[] VALUES = values();

    public static BlockTypeProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private BlockTypeProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockTypeProto)
  }

  /**
   * <pre>
   **
   * Cipher suite.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.CipherSuiteProto}
   */
  public enum CipherSuiteProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>UNKNOWN = 1;</code>
     */
    UNKNOWN(1),
    /**
     * <code>AES_CTR_NOPADDING = 2;</code>
     */
    AES_CTR_NOPADDING(2),
    /**
     * <code>SM4_CTR_NOPADDING = 3;</code>
     */
    SM4_CTR_NOPADDING(3),
    ;

    /**
     * <code>UNKNOWN = 1;</code>
     */
    public static final int UNKNOWN_VALUE = 1;
    /**
     * <code>AES_CTR_NOPADDING = 2;</code>
     */
    public static final int AES_CTR_NOPADDING_VALUE = 2;
    /**
     * <code>SM4_CTR_NOPADDING = 3;</code>
     */
    public static final int SM4_CTR_NOPADDING_VALUE = 3;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static CipherSuiteProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static CipherSuiteProto forNumber(int value) {
      switch (value) {
        case 1: return UNKNOWN;
        case 2: return AES_CTR_NOPADDING;
        case 3: return SM4_CTR_NOPADDING;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<CipherSuiteProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        CipherSuiteProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<CipherSuiteProto>() {
            public CipherSuiteProto findValueByNumber(int number) {
              return CipherSuiteProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(2);
    }

    private static final CipherSuiteProto[] VALUES = values();

    public static CipherSuiteProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private CipherSuiteProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CipherSuiteProto)
  }

  /**
   * <pre>
   **
   * Crypto protocol version used to access encrypted files.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.CryptoProtocolVersionProto}
   */
  public enum CryptoProtocolVersionProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>UNKNOWN_PROTOCOL_VERSION = 1;</code>
     */
    UNKNOWN_PROTOCOL_VERSION(1),
    /**
     * <code>ENCRYPTION_ZONES = 2;</code>
     */
    ENCRYPTION_ZONES(2),
    ;

    /**
     * <code>UNKNOWN_PROTOCOL_VERSION = 1;</code>
     */
    public static final int UNKNOWN_PROTOCOL_VERSION_VALUE = 1;
    /**
     * <code>ENCRYPTION_ZONES = 2;</code>
     */
    public static final int ENCRYPTION_ZONES_VALUE = 2;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static CryptoProtocolVersionProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static CryptoProtocolVersionProto forNumber(int value) {
      switch (value) {
        case 1: return UNKNOWN_PROTOCOL_VERSION;
        case 2: return ENCRYPTION_ZONES;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<CryptoProtocolVersionProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        CryptoProtocolVersionProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<CryptoProtocolVersionProto>() {
            public CryptoProtocolVersionProto findValueByNumber(int number) {
              return CryptoProtocolVersionProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(3);
    }

    private static final CryptoProtocolVersionProto[] VALUES = values();

    public static CryptoProtocolVersionProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private CryptoProtocolVersionProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.CryptoProtocolVersionProto)
  }

  /**
   * <pre>
   **
   * EC policy state.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.ErasureCodingPolicyState}
   */
  public enum ErasureCodingPolicyState
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>DISABLED = 1;</code>
     */
    DISABLED(1),
    /**
     * <code>ENABLED = 2;</code>
     */
    ENABLED(2),
    /**
     * <code>REMOVED = 3;</code>
     */
    REMOVED(3),
    ;

    /**
     * <code>DISABLED = 1;</code>
     */
    public static final int DISABLED_VALUE = 1;
    /**
     * <code>ENABLED = 2;</code>
     */
    public static final int ENABLED_VALUE = 2;
    /**
     * <code>REMOVED = 3;</code>
     */
    public static final int REMOVED_VALUE = 3;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static ErasureCodingPolicyState valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static ErasureCodingPolicyState forNumber(int value) {
      switch (value) {
        case 1: return DISABLED;
        case 2: return ENABLED;
        case 3: return REMOVED;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ErasureCodingPolicyState>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        ErasureCodingPolicyState> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ErasureCodingPolicyState>() {
            public ErasureCodingPolicyState findValueByNumber(int number) {
              return ErasureCodingPolicyState.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(4);
    }

    private static final ErasureCodingPolicyState[] VALUES = values();

    public static ErasureCodingPolicyState valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private ErasureCodingPolicyState(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ErasureCodingPolicyState)
  }

  /**
   * <pre>
   **
   * Checksum algorithms/types used in HDFS
   * Make sure this enum's integer values match enum values' id properties defined
   * in org.apache.hadoop.util.DataChecksum.Type
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.ChecksumTypeProto}
   */
  public enum ChecksumTypeProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>CHECKSUM_NULL = 0;</code>
     */
    CHECKSUM_NULL(0),
    /**
     * <code>CHECKSUM_CRC32 = 1;</code>
     */
    CHECKSUM_CRC32(1),
    /**
     * <code>CHECKSUM_CRC32C = 2;</code>
     */
    CHECKSUM_CRC32C(2),
    ;

    /**
     * <code>CHECKSUM_NULL = 0;</code>
     */
    public static final int CHECKSUM_NULL_VALUE = 0;
    /**
     * <code>CHECKSUM_CRC32 = 1;</code>
     */
    public static final int CHECKSUM_CRC32_VALUE = 1;
    /**
     * <code>CHECKSUM_CRC32C = 2;</code>
     */
    public static final int CHECKSUM_CRC32C_VALUE = 2;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static ChecksumTypeProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static ChecksumTypeProto forNumber(int value) {
      switch (value) {
        case 0: return CHECKSUM_NULL;
        case 1: return CHECKSUM_CRC32;
        case 2: return CHECKSUM_CRC32C;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ChecksumTypeProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        ChecksumTypeProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ChecksumTypeProto>() {
            public ChecksumTypeProto findValueByNumber(int number) {
              return ChecksumTypeProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(5);
    }

    private static final ChecksumTypeProto[] VALUES = values();

    public static ChecksumTypeProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private ChecksumTypeProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ChecksumTypeProto)
  }

  /**
   * Protobuf enum {@code hadoop.hdfs.BlockChecksumTypeProto}
   */
  public enum BlockChecksumTypeProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <pre>
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * </pre>
     *
     * <code>MD5CRC = 1;</code>
     */
    MD5CRC(1),
    /**
     * <pre>
     * Chunk-independent CRC, optionally striped
     * </pre>
     *
     * <code>COMPOSITE_CRC = 2;</code>
     */
    COMPOSITE_CRC(2),
    ;

    /**
     * <pre>
     * BlockChecksum obtained by taking the MD5 digest of chunk CRCs
     * </pre>
     *
     * <code>MD5CRC = 1;</code>
     */
    public static final int MD5CRC_VALUE = 1;
    /**
     * <pre>
     * Chunk-independent CRC, optionally striped
     * </pre>
     *
     * <code>COMPOSITE_CRC = 2;</code>
     */
    public static final int COMPOSITE_CRC_VALUE = 2;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static BlockChecksumTypeProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static BlockChecksumTypeProto forNumber(int value) {
      switch (value) {
        case 1: return MD5CRC;
        case 2: return COMPOSITE_CRC;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockChecksumTypeProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        BlockChecksumTypeProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockChecksumTypeProto>() {
            public BlockChecksumTypeProto findValueByNumber(int number) {
              return BlockChecksumTypeProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(6);
    }

    private static final BlockChecksumTypeProto[] VALUES = values();

    public static BlockChecksumTypeProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private BlockChecksumTypeProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.BlockChecksumTypeProto)
  }

  /**
   * <pre>
   **
   * File access permissions mode.
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.AccessModeProto}
   */
  public enum AccessModeProto
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>READ = 1;</code>
     */
    READ(1),
    /**
     * <code>WRITE = 2;</code>
     */
    WRITE(2),
    /**
     * <code>COPY = 3;</code>
     */
    COPY(3),
    /**
     * <code>REPLACE = 4;</code>
     */
    REPLACE(4),
    ;

    /**
     * <code>READ = 1;</code>
     */
    public static final int READ_VALUE = 1;
    /**
     * <code>WRITE = 2;</code>
     */
    public static final int WRITE_VALUE = 2;
    /**
     * <code>COPY = 3;</code>
     */
    public static final int COPY_VALUE = 3;
    /**
     * <code>REPLACE = 4;</code>
     */
    public static final int REPLACE_VALUE = 4;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static AccessModeProto valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static AccessModeProto forNumber(int value) {
      switch (value) {
        case 1: return READ;
        case 2: return WRITE;
        case 3: return COPY;
        case 4: return REPLACE;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<AccessModeProto>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        AccessModeProto> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<AccessModeProto>() {
            public AccessModeProto findValueByNumber(int number) {
              return AccessModeProto.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor().getEnumTypes().get(7);
    }

    private static final AccessModeProto[] VALUES = values();

    public static AccessModeProto valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private AccessModeProto(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.AccessModeProto)
  }

  public interface ExtendedBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ExtendedBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return Whether the poolId field is set.
     */
    boolean hasPoolId();
    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return The poolId.
     */
    java.lang.String getPoolId();
    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return The bytes for poolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPoolIdBytes();

    /**
     * <pre>
     * the local id within a pool
     * </pre>
     *
     * <code>required uint64 blockId = 2;</code>
     * @return Whether the blockId field is set.
     */
    boolean hasBlockId();
    /**
     * <pre>
     * the local id within a pool
     * </pre>
     *
     * <code>required uint64 blockId = 2;</code>
     * @return The blockId.
     */
    long getBlockId();

    /**
     * <code>required uint64 generationStamp = 3;</code>
     * @return Whether the generationStamp field is set.
     */
    boolean hasGenerationStamp();
    /**
     * <code>required uint64 generationStamp = 3;</code>
     * @return The generationStamp.
     */
    long getGenerationStamp();

    /**
     * <pre>
     * len does not belong in ebid 
     * </pre>
     *
     * <code>optional uint64 numBytes = 4 [default = 0];</code>
     * @return Whether the numBytes field is set.
     */
    boolean hasNumBytes();
    /**
     * <pre>
     * len does not belong in ebid 
     * </pre>
     *
     * <code>optional uint64 numBytes = 4 [default = 0];</code>
     * @return The numBytes.
     */
    long getNumBytes();
  }
  /**
   * <pre>
   **
   * Extended block idenfies a block
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
   */
  public static final class ExtendedBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ExtendedBlockProto)
      ExtendedBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ExtendedBlockProto.newBuilder() to construct.
    private ExtendedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ExtendedBlockProto() {
      poolId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ExtendedBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int POOLID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object poolId_ = "";
    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return Whether the poolId field is set.
     */
    @java.lang.Override
    public boolean hasPoolId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return The poolId.
     */
    @java.lang.Override
    public java.lang.String getPoolId() {
      java.lang.Object ref = poolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          poolId_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * Block pool id - globally unique across clusters
     * </pre>
     *
     * <code>required string poolId = 1;</code>
     * @return The bytes for poolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPoolIdBytes() {
      java.lang.Object ref = poolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        poolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKID_FIELD_NUMBER = 2;
    private long blockId_ = 0L;
    /**
     * <pre>
     * the local id within a pool
     * </pre>
     *
     * <code>required uint64 blockId = 2;</code>
     * @return Whether the blockId field is set.
     */
    @java.lang.Override
    public boolean hasBlockId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * the local id within a pool
     * </pre>
     *
     * <code>required uint64 blockId = 2;</code>
     * @return The blockId.
     */
    @java.lang.Override
    public long getBlockId() {
      return blockId_;
    }

    public static final int GENERATIONSTAMP_FIELD_NUMBER = 3;
    private long generationStamp_ = 0L;
    /**
     * <code>required uint64 generationStamp = 3;</code>
     * @return Whether the generationStamp field is set.
     */
    @java.lang.Override
    public boolean hasGenerationStamp() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 generationStamp = 3;</code>
     * @return The generationStamp.
     */
    @java.lang.Override
    public long getGenerationStamp() {
      return generationStamp_;
    }

    public static final int NUMBYTES_FIELD_NUMBER = 4;
    private long numBytes_ = 0L;
    /**
     * <pre>
     * len does not belong in ebid 
     * </pre>
     *
     * <code>optional uint64 numBytes = 4 [default = 0];</code>
     * @return Whether the numBytes field is set.
     */
    @java.lang.Override
    public boolean hasNumBytes() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * len does not belong in ebid 
     * </pre>
     *
     * <code>optional uint64 numBytes = 4 [default = 0];</code>
     * @return The numBytes.
     */
    @java.lang.Override
    public long getNumBytes() {
      return numBytes_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasGenerationStamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, poolId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, blockId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, generationStamp_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, numBytes_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, poolId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, blockId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, generationStamp_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, numBytes_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) obj;

      if (hasPoolId() != other.hasPoolId()) return false;
      if (hasPoolId()) {
        if (!getPoolId()
            .equals(other.getPoolId())) return false;
      }
      if (hasBlockId() != other.hasBlockId()) return false;
      if (hasBlockId()) {
        if (getBlockId()
            != other.getBlockId()) return false;
      }
      if (hasGenerationStamp() != other.hasGenerationStamp()) return false;
      if (hasGenerationStamp()) {
        if (getGenerationStamp()
            != other.getGenerationStamp()) return false;
      }
      if (hasNumBytes() != other.hasNumBytes()) return false;
      if (hasNumBytes()) {
        if (getNumBytes()
            != other.getNumBytes()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPoolId()) {
        hash = (37 * hash) + POOLID_FIELD_NUMBER;
        hash = (53 * hash) + getPoolId().hashCode();
      }
      if (hasBlockId()) {
        hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockId());
      }
      if (hasGenerationStamp()) {
        hash = (37 * hash) + GENERATIONSTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getGenerationStamp());
      }
      if (hasNumBytes()) {
        hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumBytes());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Extended block idenfies a block
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ExtendedBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ExtendedBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        poolId_ = "";
        blockId_ = 0L;
        generationStamp_ = 0L;
        numBytes_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.poolId_ = poolId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockId_ = blockId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.generationStamp_ = generationStamp_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.numBytes_ = numBytes_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) return this;
        if (other.hasPoolId()) {
          poolId_ = other.poolId_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasBlockId()) {
          setBlockId(other.getBlockId());
        }
        if (other.hasGenerationStamp()) {
          setGenerationStamp(other.getGenerationStamp());
        }
        if (other.hasNumBytes()) {
          setNumBytes(other.getNumBytes());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPoolId()) {
          return false;
        }
        if (!hasBlockId()) {
          return false;
        }
        if (!hasGenerationStamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                poolId_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                blockId_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                generationStamp_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                numBytes_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object poolId_ = "";
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @return Whether the poolId field is set.
       */
      public boolean hasPoolId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @return The poolId.
       */
      public java.lang.String getPoolId() {
        java.lang.Object ref = poolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            poolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @return The bytes for poolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPoolIdBytes() {
        java.lang.Object ref = poolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          poolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @param value The poolId to set.
       * @return This builder for chaining.
       */
      public Builder setPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        poolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearPoolId() {
        poolId_ = getDefaultInstance().getPoolId();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Block pool id - globally unique across clusters
       * </pre>
       *
       * <code>required string poolId = 1;</code>
       * @param value The bytes for poolId to set.
       * @return This builder for chaining.
       */
      public Builder setPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        poolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long blockId_ ;
      /**
       * <pre>
       * the local id within a pool
       * </pre>
       *
       * <code>required uint64 blockId = 2;</code>
       * @return Whether the blockId field is set.
       */
      @java.lang.Override
      public boolean hasBlockId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * the local id within a pool
       * </pre>
       *
       * <code>required uint64 blockId = 2;</code>
       * @return The blockId.
       */
      @java.lang.Override
      public long getBlockId() {
        return blockId_;
      }
      /**
       * <pre>
       * the local id within a pool
       * </pre>
       *
       * <code>required uint64 blockId = 2;</code>
       * @param value The blockId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockId(long value) {

        blockId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * the local id within a pool
       * </pre>
       *
       * <code>required uint64 blockId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        blockId_ = 0L;
        onChanged();
        return this;
      }

      private long generationStamp_ ;
      /**
       * <code>required uint64 generationStamp = 3;</code>
       * @return Whether the generationStamp field is set.
       */
      @java.lang.Override
      public boolean hasGenerationStamp() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 generationStamp = 3;</code>
       * @return The generationStamp.
       */
      @java.lang.Override
      public long getGenerationStamp() {
        return generationStamp_;
      }
      /**
       * <code>required uint64 generationStamp = 3;</code>
       * @param value The generationStamp to set.
       * @return This builder for chaining.
       */
      public Builder setGenerationStamp(long value) {

        generationStamp_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 generationStamp = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearGenerationStamp() {
        bitField0_ = (bitField0_ & ~0x00000004);
        generationStamp_ = 0L;
        onChanged();
        return this;
      }

      private long numBytes_ ;
      /**
       * <pre>
       * len does not belong in ebid 
       * </pre>
       *
       * <code>optional uint64 numBytes = 4 [default = 0];</code>
       * @return Whether the numBytes field is set.
       */
      @java.lang.Override
      public boolean hasNumBytes() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * len does not belong in ebid 
       * </pre>
       *
       * <code>optional uint64 numBytes = 4 [default = 0];</code>
       * @return The numBytes.
       */
      @java.lang.Override
      public long getNumBytes() {
        return numBytes_;
      }
      /**
       * <pre>
       * len does not belong in ebid 
       * </pre>
       *
       * <code>optional uint64 numBytes = 4 [default = 0];</code>
       * @param value The numBytes to set.
       * @return This builder for chaining.
       */
      public Builder setNumBytes(long value) {

        numBytes_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * len does not belong in ebid 
       * </pre>
       *
       * <code>optional uint64 numBytes = 4 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearNumBytes() {
        bitField0_ = (bitField0_ & ~0x00000008);
        numBytes_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ExtendedBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ExtendedBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ExtendedBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ExtendedBlockProto>() {
      @java.lang.Override
      public ExtendedBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ExtendedBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ExtendedBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ProvidedStorageLocationProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ProvidedStorageLocationProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string path = 1;</code>
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * <code>required string path = 1;</code>
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * <code>required string path = 1;</code>
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * <code>required int64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    boolean hasOffset();
    /**
     * <code>required int64 offset = 2;</code>
     * @return The offset.
     */
    long getOffset();

    /**
     * <code>required int64 length = 3;</code>
     * @return Whether the length field is set.
     */
    boolean hasLength();
    /**
     * <code>required int64 length = 3;</code>
     * @return The length.
     */
    long getLength();

    /**
     * <code>required bytes nonce = 4;</code>
     * @return Whether the nonce field is set.
     */
    boolean hasNonce();
    /**
     * <code>required bytes nonce = 4;</code>
     * @return The nonce.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getNonce();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto}
   */
  public static final class ProvidedStorageLocationProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ProvidedStorageLocationProto)
      ProvidedStorageLocationProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ProvidedStorageLocationProto.newBuilder() to construct.
    private ProvidedStorageLocationProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ProvidedStorageLocationProto() {
      path_ = "";
      nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ProvidedStorageLocationProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * <code>required string path = 1;</code>
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string path = 1;</code>
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string path = 1;</code>
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int OFFSET_FIELD_NUMBER = 2;
    private long offset_ = 0L;
    /**
     * <code>required int64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    @java.lang.Override
    public boolean hasOffset() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required int64 offset = 2;</code>
     * @return The offset.
     */
    @java.lang.Override
    public long getOffset() {
      return offset_;
    }

    public static final int LENGTH_FIELD_NUMBER = 3;
    private long length_ = 0L;
    /**
     * <code>required int64 length = 3;</code>
     * @return Whether the length field is set.
     */
    @java.lang.Override
    public boolean hasLength() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required int64 length = 3;</code>
     * @return The length.
     */
    @java.lang.Override
    public long getLength() {
      return length_;
    }

    public static final int NONCE_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes nonce = 4;</code>
     * @return Whether the nonce field is set.
     */
    @java.lang.Override
    public boolean hasNonce() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bytes nonce = 4;</code>
     * @return The nonce.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() {
      return nonce_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOffset()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLength()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNonce()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, offset_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeInt64(3, length_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, nonce_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, offset_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(3, length_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, nonce_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasOffset() != other.hasOffset()) return false;
      if (hasOffset()) {
        if (getOffset()
            != other.getOffset()) return false;
      }
      if (hasLength() != other.hasLength()) return false;
      if (hasLength()) {
        if (getLength()
            != other.getLength()) return false;
      }
      if (hasNonce() != other.hasNonce()) return false;
      if (hasNonce()) {
        if (!getNonce()
            .equals(other.getNonce())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasOffset()) {
        hash = (37 * hash) + OFFSET_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getOffset());
      }
      if (hasLength()) {
        hash = (37 * hash) + LENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLength());
      }
      if (hasNonce()) {
        hash = (37 * hash) + NONCE_FIELD_NUMBER;
        hash = (53 * hash) + getNonce().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ProvidedStorageLocationProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ProvidedStorageLocationProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        offset_ = 0L;
        length_ = 0L;
        nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.offset_ = offset_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.length_ = length_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.nonce_ = nonce_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasOffset()) {
          setOffset(other.getOffset());
        }
        if (other.hasLength()) {
          setLength(other.getLength());
        }
        if (other.hasNonce()) {
          setNonce(other.getNonce());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasOffset()) {
          return false;
        }
        if (!hasLength()) {
          return false;
        }
        if (!hasNonce()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                offset_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                length_ = input.readInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                nonce_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * <code>required string path = 1;</code>
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string path = 1;</code>
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string path = 1;</code>
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string path = 1;</code>
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string path = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string path = 1;</code>
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long offset_ ;
      /**
       * <code>required int64 offset = 2;</code>
       * @return Whether the offset field is set.
       */
      @java.lang.Override
      public boolean hasOffset() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required int64 offset = 2;</code>
       * @return The offset.
       */
      @java.lang.Override
      public long getOffset() {
        return offset_;
      }
      /**
       * <code>required int64 offset = 2;</code>
       * @param value The offset to set.
       * @return This builder for chaining.
       */
      public Builder setOffset(long value) {

        offset_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 offset = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearOffset() {
        bitField0_ = (bitField0_ & ~0x00000002);
        offset_ = 0L;
        onChanged();
        return this;
      }

      private long length_ ;
      /**
       * <code>required int64 length = 3;</code>
       * @return Whether the length field is set.
       */
      @java.lang.Override
      public boolean hasLength() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required int64 length = 3;</code>
       * @return The length.
       */
      @java.lang.Override
      public long getLength() {
        return length_;
      }
      /**
       * <code>required int64 length = 3;</code>
       * @param value The length to set.
       * @return This builder for chaining.
       */
      public Builder setLength(long value) {

        length_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 length = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearLength() {
        bitField0_ = (bitField0_ & ~0x00000004);
        length_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes nonce = 4;</code>
       * @return Whether the nonce field is set.
       */
      @java.lang.Override
      public boolean hasNonce() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bytes nonce = 4;</code>
       * @return The nonce.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() {
        return nonce_;
      }
      /**
       * <code>required bytes nonce = 4;</code>
       * @param value The nonce to set.
       * @return This builder for chaining.
       */
      public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        nonce_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes nonce = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearNonce() {
        bitField0_ = (bitField0_ & ~0x00000008);
        nonce_ = getDefaultInstance().getNonce();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ProvidedStorageLocationProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ProvidedStorageLocationProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ProvidedStorageLocationProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ProvidedStorageLocationProto>() {
      @java.lang.Override
      public ProvidedStorageLocationProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ProvidedStorageLocationProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ProvidedStorageLocationProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeIDProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeIDProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return Whether the ipAddr field is set.
     */
    boolean hasIpAddr();
    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return The ipAddr.
     */
    java.lang.String getIpAddr();
    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return The bytes for ipAddr.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getIpAddrBytes();

    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return Whether the hostName field is set.
     */
    boolean hasHostName();
    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return The hostName.
     */
    java.lang.String getHostName();
    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return The bytes for hostName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getHostNameBytes();

    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return Whether the datanodeUuid field is set.
     */
    boolean hasDatanodeUuid();
    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return The datanodeUuid.
     */
    java.lang.String getDatanodeUuid();
    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return The bytes for datanodeUuid.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getDatanodeUuidBytes();

    /**
     * <pre>
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * </pre>
     *
     * <code>required uint32 xferPort = 4;</code>
     * @return Whether the xferPort field is set.
     */
    boolean hasXferPort();
    /**
     * <pre>
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * </pre>
     *
     * <code>required uint32 xferPort = 4;</code>
     * @return The xferPort.
     */
    int getXferPort();

    /**
     * <pre>
     * datanode http port
     * </pre>
     *
     * <code>required uint32 infoPort = 5;</code>
     * @return Whether the infoPort field is set.
     */
    boolean hasInfoPort();
    /**
     * <pre>
     * datanode http port
     * </pre>
     *
     * <code>required uint32 infoPort = 5;</code>
     * @return The infoPort.
     */
    int getInfoPort();

    /**
     * <pre>
     * ipc server port
     * </pre>
     *
     * <code>required uint32 ipcPort = 6;</code>
     * @return Whether the ipcPort field is set.
     */
    boolean hasIpcPort();
    /**
     * <pre>
     * ipc server port
     * </pre>
     *
     * <code>required uint32 ipcPort = 6;</code>
     * @return The ipcPort.
     */
    int getIpcPort();

    /**
     * <pre>
     * datanode https port
     * </pre>
     *
     * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
     * @return Whether the infoSecurePort field is set.
     */
    boolean hasInfoSecurePort();
    /**
     * <pre>
     * datanode https port
     * </pre>
     *
     * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
     * @return The infoSecurePort.
     */
    int getInfoSecurePort();
  }
  /**
   * <pre>
   **
   * Identifies a Datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
   */
  public static final class DatanodeIDProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeIDProto)
      DatanodeIDProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeIDProto.newBuilder() to construct.
    private DatanodeIDProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeIDProto() {
      ipAddr_ = "";
      hostName_ = "";
      datanodeUuid_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeIDProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
    }

    private int bitField0_;
    public static final int IPADDR_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ipAddr_ = "";
    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return Whether the ipAddr field is set.
     */
    @java.lang.Override
    public boolean hasIpAddr() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return The ipAddr.
     */
    @java.lang.Override
    public java.lang.String getIpAddr() {
      java.lang.Object ref = ipAddr_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ipAddr_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * IP address
     * </pre>
     *
     * <code>required string ipAddr = 1;</code>
     * @return The bytes for ipAddr.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getIpAddrBytes() {
      java.lang.Object ref = ipAddr_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ipAddr_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int HOSTNAME_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object hostName_ = "";
    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return Whether the hostName field is set.
     */
    @java.lang.Override
    public boolean hasHostName() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return The hostName.
     */
    @java.lang.Override
    public java.lang.String getHostName() {
      java.lang.Object ref = hostName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          hostName_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * hostname
     * </pre>
     *
     * <code>required string hostName = 2;</code>
     * @return The bytes for hostName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getHostNameBytes() {
      java.lang.Object ref = hostName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        hostName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int DATANODEUUID_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object datanodeUuid_ = "";
    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return Whether the datanodeUuid field is set.
     */
    @java.lang.Override
    public boolean hasDatanodeUuid() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return The datanodeUuid.
     */
    @java.lang.Override
    public java.lang.String getDatanodeUuid() {
      java.lang.Object ref = datanodeUuid_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          datanodeUuid_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * UUID assigned to the Datanode. For
     * </pre>
     *
     * <code>required string datanodeUuid = 3;</code>
     * @return The bytes for datanodeUuid.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getDatanodeUuidBytes() {
      java.lang.Object ref = datanodeUuid_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        datanodeUuid_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int XFERPORT_FIELD_NUMBER = 4;
    private int xferPort_ = 0;
    /**
     * <pre>
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * </pre>
     *
     * <code>required uint32 xferPort = 4;</code>
     * @return Whether the xferPort field is set.
     */
    @java.lang.Override
    public boolean hasXferPort() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * upgraded clusters this is the same
     * as the original StorageID of the
     * Datanode.
     * </pre>
     *
     * <code>required uint32 xferPort = 4;</code>
     * @return The xferPort.
     */
    @java.lang.Override
    public int getXferPort() {
      return xferPort_;
    }

    public static final int INFOPORT_FIELD_NUMBER = 5;
    private int infoPort_ = 0;
    /**
     * <pre>
     * datanode http port
     * </pre>
     *
     * <code>required uint32 infoPort = 5;</code>
     * @return Whether the infoPort field is set.
     */
    @java.lang.Override
    public boolean hasInfoPort() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <pre>
     * datanode http port
     * </pre>
     *
     * <code>required uint32 infoPort = 5;</code>
     * @return The infoPort.
     */
    @java.lang.Override
    public int getInfoPort() {
      return infoPort_;
    }

    public static final int IPCPORT_FIELD_NUMBER = 6;
    private int ipcPort_ = 0;
    /**
     * <pre>
     * ipc server port
     * </pre>
     *
     * <code>required uint32 ipcPort = 6;</code>
     * @return Whether the ipcPort field is set.
     */
    @java.lang.Override
    public boolean hasIpcPort() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <pre>
     * ipc server port
     * </pre>
     *
     * <code>required uint32 ipcPort = 6;</code>
     * @return The ipcPort.
     */
    @java.lang.Override
    public int getIpcPort() {
      return ipcPort_;
    }

    public static final int INFOSECUREPORT_FIELD_NUMBER = 7;
    private int infoSecurePort_ = 0;
    /**
     * <pre>
     * datanode https port
     * </pre>
     *
     * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
     * @return Whether the infoSecurePort field is set.
     */
    @java.lang.Override
    public boolean hasInfoSecurePort() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <pre>
     * datanode https port
     * </pre>
     *
     * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
     * @return The infoSecurePort.
     */
    @java.lang.Override
    public int getInfoSecurePort() {
      return infoSecurePort_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasIpAddr()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasHostName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDatanodeUuid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasXferPort()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasInfoPort()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIpcPort()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ipAddr_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, hostName_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, datanodeUuid_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(4, xferPort_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt32(5, infoPort_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt32(6, ipcPort_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt32(7, infoSecurePort_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ipAddr_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, hostName_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, datanodeUuid_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(4, xferPort_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(5, infoPort_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(6, ipcPort_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(7, infoSecurePort_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) obj;

      if (hasIpAddr() != other.hasIpAddr()) return false;
      if (hasIpAddr()) {
        if (!getIpAddr()
            .equals(other.getIpAddr())) return false;
      }
      if (hasHostName() != other.hasHostName()) return false;
      if (hasHostName()) {
        if (!getHostName()
            .equals(other.getHostName())) return false;
      }
      if (hasDatanodeUuid() != other.hasDatanodeUuid()) return false;
      if (hasDatanodeUuid()) {
        if (!getDatanodeUuid()
            .equals(other.getDatanodeUuid())) return false;
      }
      if (hasXferPort() != other.hasXferPort()) return false;
      if (hasXferPort()) {
        if (getXferPort()
            != other.getXferPort()) return false;
      }
      if (hasInfoPort() != other.hasInfoPort()) return false;
      if (hasInfoPort()) {
        if (getInfoPort()
            != other.getInfoPort()) return false;
      }
      if (hasIpcPort() != other.hasIpcPort()) return false;
      if (hasIpcPort()) {
        if (getIpcPort()
            != other.getIpcPort()) return false;
      }
      if (hasInfoSecurePort() != other.hasInfoSecurePort()) return false;
      if (hasInfoSecurePort()) {
        if (getInfoSecurePort()
            != other.getInfoSecurePort()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasIpAddr()) {
        hash = (37 * hash) + IPADDR_FIELD_NUMBER;
        hash = (53 * hash) + getIpAddr().hashCode();
      }
      if (hasHostName()) {
        hash = (37 * hash) + HOSTNAME_FIELD_NUMBER;
        hash = (53 * hash) + getHostName().hashCode();
      }
      if (hasDatanodeUuid()) {
        hash = (37 * hash) + DATANODEUUID_FIELD_NUMBER;
        hash = (53 * hash) + getDatanodeUuid().hashCode();
      }
      if (hasXferPort()) {
        hash = (37 * hash) + XFERPORT_FIELD_NUMBER;
        hash = (53 * hash) + getXferPort();
      }
      if (hasInfoPort()) {
        hash = (37 * hash) + INFOPORT_FIELD_NUMBER;
        hash = (53 * hash) + getInfoPort();
      }
      if (hasIpcPort()) {
        hash = (37 * hash) + IPCPORT_FIELD_NUMBER;
        hash = (53 * hash) + getIpcPort();
      }
      if (hasInfoSecurePort()) {
        hash = (37 * hash) + INFOSECUREPORT_FIELD_NUMBER;
        hash = (53 * hash) + getInfoSecurePort();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Identifies a Datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeIDProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeIDProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        ipAddr_ = "";
        hostName_ = "";
        datanodeUuid_ = "";
        xferPort_ = 0;
        infoPort_ = 0;
        ipcPort_ = 0;
        infoSecurePort_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.ipAddr_ = ipAddr_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.hostName_ = hostName_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.datanodeUuid_ = datanodeUuid_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.xferPort_ = xferPort_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.infoPort_ = infoPort_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.ipcPort_ = ipcPort_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.infoSecurePort_ = infoSecurePort_;
          to_bitField0_ |= 0x00000040;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) return this;
        if (other.hasIpAddr()) {
          ipAddr_ = other.ipAddr_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasHostName()) {
          hostName_ = other.hostName_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasDatanodeUuid()) {
          datanodeUuid_ = other.datanodeUuid_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (other.hasXferPort()) {
          setXferPort(other.getXferPort());
        }
        if (other.hasInfoPort()) {
          setInfoPort(other.getInfoPort());
        }
        if (other.hasIpcPort()) {
          setIpcPort(other.getIpcPort());
        }
        if (other.hasInfoSecurePort()) {
          setInfoSecurePort(other.getInfoSecurePort());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasIpAddr()) {
          return false;
        }
        if (!hasHostName()) {
          return false;
        }
        if (!hasDatanodeUuid()) {
          return false;
        }
        if (!hasXferPort()) {
          return false;
        }
        if (!hasInfoPort()) {
          return false;
        }
        if (!hasIpcPort()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                ipAddr_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                hostName_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                datanodeUuid_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                xferPort_ = input.readUInt32();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                infoPort_ = input.readUInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                ipcPort_ = input.readUInt32();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                infoSecurePort_ = input.readUInt32();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object ipAddr_ = "";
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @return Whether the ipAddr field is set.
       */
      public boolean hasIpAddr() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @return The ipAddr.
       */
      public java.lang.String getIpAddr() {
        java.lang.Object ref = ipAddr_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ipAddr_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @return The bytes for ipAddr.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getIpAddrBytes() {
        java.lang.Object ref = ipAddr_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ipAddr_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @param value The ipAddr to set.
       * @return This builder for chaining.
       */
      public Builder setIpAddr(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ipAddr_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearIpAddr() {
        ipAddr_ = getDefaultInstance().getIpAddr();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * IP address
       * </pre>
       *
       * <code>required string ipAddr = 1;</code>
       * @param value The bytes for ipAddr to set.
       * @return This builder for chaining.
       */
      public Builder setIpAddrBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ipAddr_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object hostName_ = "";
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @return Whether the hostName field is set.
       */
      public boolean hasHostName() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @return The hostName.
       */
      public java.lang.String getHostName() {
        java.lang.Object ref = hostName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            hostName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @return The bytes for hostName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getHostNameBytes() {
        java.lang.Object ref = hostName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          hostName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @param value The hostName to set.
       * @return This builder for chaining.
       */
      public Builder setHostName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        hostName_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearHostName() {
        hostName_ = getDefaultInstance().getHostName();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * hostname
       * </pre>
       *
       * <code>required string hostName = 2;</code>
       * @param value The bytes for hostName to set.
       * @return This builder for chaining.
       */
      public Builder setHostNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        hostName_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private java.lang.Object datanodeUuid_ = "";
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @return Whether the datanodeUuid field is set.
       */
      public boolean hasDatanodeUuid() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @return The datanodeUuid.
       */
      public java.lang.String getDatanodeUuid() {
        java.lang.Object ref = datanodeUuid_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            datanodeUuid_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @return The bytes for datanodeUuid.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getDatanodeUuidBytes() {
        java.lang.Object ref = datanodeUuid_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          datanodeUuid_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @param value The datanodeUuid to set.
       * @return This builder for chaining.
       */
      public Builder setDatanodeUuid(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        datanodeUuid_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearDatanodeUuid() {
        datanodeUuid_ = getDefaultInstance().getDatanodeUuid();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * UUID assigned to the Datanode. For
       * </pre>
       *
       * <code>required string datanodeUuid = 3;</code>
       * @param value The bytes for datanodeUuid to set.
       * @return This builder for chaining.
       */
      public Builder setDatanodeUuidBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        datanodeUuid_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private int xferPort_ ;
      /**
       * <pre>
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * </pre>
       *
       * <code>required uint32 xferPort = 4;</code>
       * @return Whether the xferPort field is set.
       */
      @java.lang.Override
      public boolean hasXferPort() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * </pre>
       *
       * <code>required uint32 xferPort = 4;</code>
       * @return The xferPort.
       */
      @java.lang.Override
      public int getXferPort() {
        return xferPort_;
      }
      /**
       * <pre>
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * </pre>
       *
       * <code>required uint32 xferPort = 4;</code>
       * @param value The xferPort to set.
       * @return This builder for chaining.
       */
      public Builder setXferPort(int value) {

        xferPort_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * upgraded clusters this is the same
       * as the original StorageID of the
       * Datanode.
       * </pre>
       *
       * <code>required uint32 xferPort = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearXferPort() {
        bitField0_ = (bitField0_ & ~0x00000008);
        xferPort_ = 0;
        onChanged();
        return this;
      }

      private int infoPort_ ;
      /**
       * <pre>
       * datanode http port
       * </pre>
       *
       * <code>required uint32 infoPort = 5;</code>
       * @return Whether the infoPort field is set.
       */
      @java.lang.Override
      public boolean hasInfoPort() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <pre>
       * datanode http port
       * </pre>
       *
       * <code>required uint32 infoPort = 5;</code>
       * @return The infoPort.
       */
      @java.lang.Override
      public int getInfoPort() {
        return infoPort_;
      }
      /**
       * <pre>
       * datanode http port
       * </pre>
       *
       * <code>required uint32 infoPort = 5;</code>
       * @param value The infoPort to set.
       * @return This builder for chaining.
       */
      public Builder setInfoPort(int value) {

        infoPort_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * datanode http port
       * </pre>
       *
       * <code>required uint32 infoPort = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearInfoPort() {
        bitField0_ = (bitField0_ & ~0x00000010);
        infoPort_ = 0;
        onChanged();
        return this;
      }

      private int ipcPort_ ;
      /**
       * <pre>
       * ipc server port
       * </pre>
       *
       * <code>required uint32 ipcPort = 6;</code>
       * @return Whether the ipcPort field is set.
       */
      @java.lang.Override
      public boolean hasIpcPort() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <pre>
       * ipc server port
       * </pre>
       *
       * <code>required uint32 ipcPort = 6;</code>
       * @return The ipcPort.
       */
      @java.lang.Override
      public int getIpcPort() {
        return ipcPort_;
      }
      /**
       * <pre>
       * ipc server port
       * </pre>
       *
       * <code>required uint32 ipcPort = 6;</code>
       * @param value The ipcPort to set.
       * @return This builder for chaining.
       */
      public Builder setIpcPort(int value) {

        ipcPort_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * ipc server port
       * </pre>
       *
       * <code>required uint32 ipcPort = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearIpcPort() {
        bitField0_ = (bitField0_ & ~0x00000020);
        ipcPort_ = 0;
        onChanged();
        return this;
      }

      private int infoSecurePort_ ;
      /**
       * <pre>
       * datanode https port
       * </pre>
       *
       * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
       * @return Whether the infoSecurePort field is set.
       */
      @java.lang.Override
      public boolean hasInfoSecurePort() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <pre>
       * datanode https port
       * </pre>
       *
       * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
       * @return The infoSecurePort.
       */
      @java.lang.Override
      public int getInfoSecurePort() {
        return infoSecurePort_;
      }
      /**
       * <pre>
       * datanode https port
       * </pre>
       *
       * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
       * @param value The infoSecurePort to set.
       * @return This builder for chaining.
       */
      public Builder setInfoSecurePort(int value) {

        infoSecurePort_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * datanode https port
       * </pre>
       *
       * <code>optional uint32 infoSecurePort = 7 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearInfoSecurePort() {
        bitField0_ = (bitField0_ & ~0x00000040);
        infoSecurePort_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeIDProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeIDProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeIDProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeIDProto>() {
      @java.lang.Override
      public DatanodeIDProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeIDProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeIDProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeLocalInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeLocalInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string softwareVersion = 1;</code>
     * @return Whether the softwareVersion field is set.
     */
    boolean hasSoftwareVersion();
    /**
     * <code>required string softwareVersion = 1;</code>
     * @return The softwareVersion.
     */
    java.lang.String getSoftwareVersion();
    /**
     * <code>required string softwareVersion = 1;</code>
     * @return The bytes for softwareVersion.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes();

    /**
     * <code>required string configVersion = 2;</code>
     * @return Whether the configVersion field is set.
     */
    boolean hasConfigVersion();
    /**
     * <code>required string configVersion = 2;</code>
     * @return The configVersion.
     */
    java.lang.String getConfigVersion();
    /**
     * <code>required string configVersion = 2;</code>
     * @return The bytes for configVersion.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getConfigVersionBytes();

    /**
     * <code>required uint64 uptime = 3;</code>
     * @return Whether the uptime field is set.
     */
    boolean hasUptime();
    /**
     * <code>required uint64 uptime = 3;</code>
     * @return The uptime.
     */
    long getUptime();
  }
  /**
   * <pre>
   **
   * Datanode local information
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
   */
  public static final class DatanodeLocalInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeLocalInfoProto)
      DatanodeLocalInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeLocalInfoProto.newBuilder() to construct.
    private DatanodeLocalInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeLocalInfoProto() {
      softwareVersion_ = "";
      configVersion_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeLocalInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int SOFTWAREVERSION_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object softwareVersion_ = "";
    /**
     * <code>required string softwareVersion = 1;</code>
     * @return Whether the softwareVersion field is set.
     */
    @java.lang.Override
    public boolean hasSoftwareVersion() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string softwareVersion = 1;</code>
     * @return The softwareVersion.
     */
    @java.lang.Override
    public java.lang.String getSoftwareVersion() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          softwareVersion_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string softwareVersion = 1;</code>
     * @return The bytes for softwareVersion.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        softwareVersion_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CONFIGVERSION_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object configVersion_ = "";
    /**
     * <code>required string configVersion = 2;</code>
     * @return Whether the configVersion field is set.
     */
    @java.lang.Override
    public boolean hasConfigVersion() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string configVersion = 2;</code>
     * @return The configVersion.
     */
    @java.lang.Override
    public java.lang.String getConfigVersion() {
      java.lang.Object ref = configVersion_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          configVersion_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string configVersion = 2;</code>
     * @return The bytes for configVersion.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getConfigVersionBytes() {
      java.lang.Object ref = configVersion_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        configVersion_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int UPTIME_FIELD_NUMBER = 3;
    private long uptime_ = 0L;
    /**
     * <code>required uint64 uptime = 3;</code>
     * @return Whether the uptime field is set.
     */
    @java.lang.Override
    public boolean hasUptime() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 uptime = 3;</code>
     * @return The uptime.
     */
    @java.lang.Override
    public long getUptime() {
      return uptime_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSoftwareVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasConfigVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasUptime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, softwareVersion_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, configVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, uptime_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, softwareVersion_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, configVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, uptime_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) obj;

      if (hasSoftwareVersion() != other.hasSoftwareVersion()) return false;
      if (hasSoftwareVersion()) {
        if (!getSoftwareVersion()
            .equals(other.getSoftwareVersion())) return false;
      }
      if (hasConfigVersion() != other.hasConfigVersion()) return false;
      if (hasConfigVersion()) {
        if (!getConfigVersion()
            .equals(other.getConfigVersion())) return false;
      }
      if (hasUptime() != other.hasUptime()) return false;
      if (hasUptime()) {
        if (getUptime()
            != other.getUptime()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSoftwareVersion()) {
        hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getSoftwareVersion().hashCode();
      }
      if (hasConfigVersion()) {
        hash = (37 * hash) + CONFIGVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getConfigVersion().hashCode();
      }
      if (hasUptime()) {
        hash = (37 * hash) + UPTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getUptime());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Datanode local information
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeLocalInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeLocalInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        softwareVersion_ = "";
        configVersion_ = "";
        uptime_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.softwareVersion_ = softwareVersion_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.configVersion_ = configVersion_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.uptime_ = uptime_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto.getDefaultInstance()) return this;
        if (other.hasSoftwareVersion()) {
          softwareVersion_ = other.softwareVersion_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasConfigVersion()) {
          configVersion_ = other.configVersion_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasUptime()) {
          setUptime(other.getUptime());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSoftwareVersion()) {
          return false;
        }
        if (!hasConfigVersion()) {
          return false;
        }
        if (!hasUptime()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                softwareVersion_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                configVersion_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                uptime_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object softwareVersion_ = "";
      /**
       * <code>required string softwareVersion = 1;</code>
       * @return Whether the softwareVersion field is set.
       */
      public boolean hasSoftwareVersion() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string softwareVersion = 1;</code>
       * @return The softwareVersion.
       */
      public java.lang.String getSoftwareVersion() {
        java.lang.Object ref = softwareVersion_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            softwareVersion_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string softwareVersion = 1;</code>
       * @return The bytes for softwareVersion.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSoftwareVersionBytes() {
        java.lang.Object ref = softwareVersion_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          softwareVersion_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string softwareVersion = 1;</code>
       * @param value The softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string softwareVersion = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSoftwareVersion() {
        softwareVersion_ = getDefaultInstance().getSoftwareVersion();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string softwareVersion = 1;</code>
       * @param value The bytes for softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object configVersion_ = "";
      /**
       * <code>required string configVersion = 2;</code>
       * @return Whether the configVersion field is set.
       */
      public boolean hasConfigVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string configVersion = 2;</code>
       * @return The configVersion.
       */
      public java.lang.String getConfigVersion() {
        java.lang.Object ref = configVersion_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            configVersion_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string configVersion = 2;</code>
       * @return The bytes for configVersion.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getConfigVersionBytes() {
        java.lang.Object ref = configVersion_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          configVersion_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string configVersion = 2;</code>
       * @param value The configVersion to set.
       * @return This builder for chaining.
       */
      public Builder setConfigVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        configVersion_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string configVersion = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearConfigVersion() {
        configVersion_ = getDefaultInstance().getConfigVersion();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string configVersion = 2;</code>
       * @param value The bytes for configVersion to set.
       * @return This builder for chaining.
       */
      public Builder setConfigVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        configVersion_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private long uptime_ ;
      /**
       * <code>required uint64 uptime = 3;</code>
       * @return Whether the uptime field is set.
       */
      @java.lang.Override
      public boolean hasUptime() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 uptime = 3;</code>
       * @return The uptime.
       */
      @java.lang.Override
      public long getUptime() {
        return uptime_;
      }
      /**
       * <code>required uint64 uptime = 3;</code>
       * @param value The uptime to set.
       * @return This builder for chaining.
       */
      public Builder setUptime(long value) {

        uptime_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 uptime = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearUptime() {
        bitField0_ = (bitField0_ & ~0x00000004);
        uptime_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeLocalInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeLocalInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeLocalInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeLocalInfoProto>() {
      @java.lang.Override
      public DatanodeLocalInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeLocalInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeLocalInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeVolumeInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeVolumeInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string path = 1;</code>
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * <code>required string path = 1;</code>
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * <code>required string path = 1;</code>
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();

    /**
     * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
     * @return Whether the storageType field is set.
     */
    boolean hasStorageType();
    /**
     * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
     * @return The storageType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();

    /**
     * <code>required uint64 usedSpace = 3;</code>
     * @return Whether the usedSpace field is set.
     */
    boolean hasUsedSpace();
    /**
     * <code>required uint64 usedSpace = 3;</code>
     * @return The usedSpace.
     */
    long getUsedSpace();

    /**
     * <code>required uint64 freeSpace = 4;</code>
     * @return Whether the freeSpace field is set.
     */
    boolean hasFreeSpace();
    /**
     * <code>required uint64 freeSpace = 4;</code>
     * @return The freeSpace.
     */
    long getFreeSpace();

    /**
     * <code>required uint64 reservedSpace = 5;</code>
     * @return Whether the reservedSpace field is set.
     */
    boolean hasReservedSpace();
    /**
     * <code>required uint64 reservedSpace = 5;</code>
     * @return The reservedSpace.
     */
    long getReservedSpace();

    /**
     * <code>required uint64 reservedSpaceForReplicas = 6;</code>
     * @return Whether the reservedSpaceForReplicas field is set.
     */
    boolean hasReservedSpaceForReplicas();
    /**
     * <code>required uint64 reservedSpaceForReplicas = 6;</code>
     * @return The reservedSpaceForReplicas.
     */
    long getReservedSpaceForReplicas();

    /**
     * <code>required uint64 numBlocks = 7;</code>
     * @return Whether the numBlocks field is set.
     */
    boolean hasNumBlocks();
    /**
     * <code>required uint64 numBlocks = 7;</code>
     * @return The numBlocks.
     */
    long getNumBlocks();
  }
  /**
   * <pre>
   **
   * Datanode volume information
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto}
   */
  public static final class DatanodeVolumeInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeVolumeInfoProto)
      DatanodeVolumeInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeVolumeInfoProto.newBuilder() to construct.
    private DatanodeVolumeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeVolumeInfoProto() {
      path_ = "";
      storageType_ = 1;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeVolumeInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int PATH_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * <code>required string path = 1;</code>
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string path = 1;</code>
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string path = 1;</code>
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int STORAGETYPE_FIELD_NUMBER = 2;
    private int storageType_ = 1;
    /**
     * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
     * @return Whether the storageType field is set.
     */
    @java.lang.Override public boolean hasStorageType() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
     * @return The storageType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
    }

    public static final int USEDSPACE_FIELD_NUMBER = 3;
    private long usedSpace_ = 0L;
    /**
     * <code>required uint64 usedSpace = 3;</code>
     * @return Whether the usedSpace field is set.
     */
    @java.lang.Override
    public boolean hasUsedSpace() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 usedSpace = 3;</code>
     * @return The usedSpace.
     */
    @java.lang.Override
    public long getUsedSpace() {
      return usedSpace_;
    }

    public static final int FREESPACE_FIELD_NUMBER = 4;
    private long freeSpace_ = 0L;
    /**
     * <code>required uint64 freeSpace = 4;</code>
     * @return Whether the freeSpace field is set.
     */
    @java.lang.Override
    public boolean hasFreeSpace() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required uint64 freeSpace = 4;</code>
     * @return The freeSpace.
     */
    @java.lang.Override
    public long getFreeSpace() {
      return freeSpace_;
    }

    public static final int RESERVEDSPACE_FIELD_NUMBER = 5;
    private long reservedSpace_ = 0L;
    /**
     * <code>required uint64 reservedSpace = 5;</code>
     * @return Whether the reservedSpace field is set.
     */
    @java.lang.Override
    public boolean hasReservedSpace() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required uint64 reservedSpace = 5;</code>
     * @return The reservedSpace.
     */
    @java.lang.Override
    public long getReservedSpace() {
      return reservedSpace_;
    }

    public static final int RESERVEDSPACEFORREPLICAS_FIELD_NUMBER = 6;
    private long reservedSpaceForReplicas_ = 0L;
    /**
     * <code>required uint64 reservedSpaceForReplicas = 6;</code>
     * @return Whether the reservedSpaceForReplicas field is set.
     */
    @java.lang.Override
    public boolean hasReservedSpaceForReplicas() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>required uint64 reservedSpaceForReplicas = 6;</code>
     * @return The reservedSpaceForReplicas.
     */
    @java.lang.Override
    public long getReservedSpaceForReplicas() {
      return reservedSpaceForReplicas_;
    }

    public static final int NUMBLOCKS_FIELD_NUMBER = 7;
    private long numBlocks_ = 0L;
    /**
     * <code>required uint64 numBlocks = 7;</code>
     * @return Whether the numBlocks field is set.
     */
    @java.lang.Override
    public boolean hasNumBlocks() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>required uint64 numBlocks = 7;</code>
     * @return The numBlocks.
     */
    @java.lang.Override
    public long getNumBlocks() {
      return numBlocks_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStorageType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasUsedSpace()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFreeSpace()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasReservedSpace()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasReservedSpaceForReplicas()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumBlocks()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(2, storageType_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, usedSpace_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, freeSpace_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, reservedSpace_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, reservedSpaceForReplicas_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt64(7, numBlocks_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, path_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(2, storageType_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, usedSpace_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, freeSpace_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, reservedSpace_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, reservedSpaceForReplicas_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, numBlocks_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) obj;

      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasStorageType() != other.hasStorageType()) return false;
      if (hasStorageType()) {
        if (storageType_ != other.storageType_) return false;
      }
      if (hasUsedSpace() != other.hasUsedSpace()) return false;
      if (hasUsedSpace()) {
        if (getUsedSpace()
            != other.getUsedSpace()) return false;
      }
      if (hasFreeSpace() != other.hasFreeSpace()) return false;
      if (hasFreeSpace()) {
        if (getFreeSpace()
            != other.getFreeSpace()) return false;
      }
      if (hasReservedSpace() != other.hasReservedSpace()) return false;
      if (hasReservedSpace()) {
        if (getReservedSpace()
            != other.getReservedSpace()) return false;
      }
      if (hasReservedSpaceForReplicas() != other.hasReservedSpaceForReplicas()) return false;
      if (hasReservedSpaceForReplicas()) {
        if (getReservedSpaceForReplicas()
            != other.getReservedSpaceForReplicas()) return false;
      }
      if (hasNumBlocks() != other.hasNumBlocks()) return false;
      if (hasNumBlocks()) {
        if (getNumBlocks()
            != other.getNumBlocks()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasStorageType()) {
        hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
        hash = (53 * hash) + storageType_;
      }
      if (hasUsedSpace()) {
        hash = (37 * hash) + USEDSPACE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getUsedSpace());
      }
      if (hasFreeSpace()) {
        hash = (37 * hash) + FREESPACE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFreeSpace());
      }
      if (hasReservedSpace()) {
        hash = (37 * hash) + RESERVEDSPACE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getReservedSpace());
      }
      if (hasReservedSpaceForReplicas()) {
        hash = (37 * hash) + RESERVEDSPACEFORREPLICAS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getReservedSpaceForReplicas());
      }
      if (hasNumBlocks()) {
        hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumBlocks());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Datanode volume information
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeVolumeInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeVolumeInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        path_ = "";
        storageType_ = 1;
        usedSpace_ = 0L;
        freeSpace_ = 0L;
        reservedSpace_ = 0L;
        reservedSpaceForReplicas_ = 0L;
        numBlocks_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.storageType_ = storageType_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.usedSpace_ = usedSpace_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.freeSpace_ = freeSpace_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.reservedSpace_ = reservedSpace_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.reservedSpaceForReplicas_ = reservedSpaceForReplicas_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.numBlocks_ = numBlocks_;
          to_bitField0_ |= 0x00000040;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto.getDefaultInstance()) return this;
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasStorageType()) {
          setStorageType(other.getStorageType());
        }
        if (other.hasUsedSpace()) {
          setUsedSpace(other.getUsedSpace());
        }
        if (other.hasFreeSpace()) {
          setFreeSpace(other.getFreeSpace());
        }
        if (other.hasReservedSpace()) {
          setReservedSpace(other.getReservedSpace());
        }
        if (other.hasReservedSpaceForReplicas()) {
          setReservedSpaceForReplicas(other.getReservedSpaceForReplicas());
        }
        if (other.hasNumBlocks()) {
          setNumBlocks(other.getNumBlocks());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPath()) {
          return false;
        }
        if (!hasStorageType()) {
          return false;
        }
        if (!hasUsedSpace()) {
          return false;
        }
        if (!hasFreeSpace()) {
          return false;
        }
        if (!hasReservedSpace()) {
          return false;
        }
        if (!hasReservedSpaceForReplicas()) {
          return false;
        }
        if (!hasNumBlocks()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  storageType_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 16
              case 24: {
                usedSpace_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                freeSpace_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                reservedSpace_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                reservedSpaceForReplicas_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                numBlocks_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object path_ = "";
      /**
       * <code>required string path = 1;</code>
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string path = 1;</code>
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string path = 1;</code>
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string path = 1;</code>
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string path = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string path = 1;</code>
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private int storageType_ = 1;
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
       * @return Whether the storageType field is set.
       */
      @java.lang.Override public boolean hasStorageType() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
       * @return The storageType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
       * @param value The storageType to set.
       * @return This builder for chaining.
       */
      public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        storageType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.StorageTypeProto storageType = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageType() {
        bitField0_ = (bitField0_ & ~0x00000002);
        storageType_ = 1;
        onChanged();
        return this;
      }

      private long usedSpace_ ;
      /**
       * <code>required uint64 usedSpace = 3;</code>
       * @return Whether the usedSpace field is set.
       */
      @java.lang.Override
      public boolean hasUsedSpace() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 usedSpace = 3;</code>
       * @return The usedSpace.
       */
      @java.lang.Override
      public long getUsedSpace() {
        return usedSpace_;
      }
      /**
       * <code>required uint64 usedSpace = 3;</code>
       * @param value The usedSpace to set.
       * @return This builder for chaining.
       */
      public Builder setUsedSpace(long value) {

        usedSpace_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 usedSpace = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearUsedSpace() {
        bitField0_ = (bitField0_ & ~0x00000004);
        usedSpace_ = 0L;
        onChanged();
        return this;
      }

      private long freeSpace_ ;
      /**
       * <code>required uint64 freeSpace = 4;</code>
       * @return Whether the freeSpace field is set.
       */
      @java.lang.Override
      public boolean hasFreeSpace() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required uint64 freeSpace = 4;</code>
       * @return The freeSpace.
       */
      @java.lang.Override
      public long getFreeSpace() {
        return freeSpace_;
      }
      /**
       * <code>required uint64 freeSpace = 4;</code>
       * @param value The freeSpace to set.
       * @return This builder for chaining.
       */
      public Builder setFreeSpace(long value) {

        freeSpace_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 freeSpace = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearFreeSpace() {
        bitField0_ = (bitField0_ & ~0x00000008);
        freeSpace_ = 0L;
        onChanged();
        return this;
      }

      private long reservedSpace_ ;
      /**
       * <code>required uint64 reservedSpace = 5;</code>
       * @return Whether the reservedSpace field is set.
       */
      @java.lang.Override
      public boolean hasReservedSpace() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required uint64 reservedSpace = 5;</code>
       * @return The reservedSpace.
       */
      @java.lang.Override
      public long getReservedSpace() {
        return reservedSpace_;
      }
      /**
       * <code>required uint64 reservedSpace = 5;</code>
       * @param value The reservedSpace to set.
       * @return This builder for chaining.
       */
      public Builder setReservedSpace(long value) {

        reservedSpace_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 reservedSpace = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearReservedSpace() {
        bitField0_ = (bitField0_ & ~0x00000010);
        reservedSpace_ = 0L;
        onChanged();
        return this;
      }

      private long reservedSpaceForReplicas_ ;
      /**
       * <code>required uint64 reservedSpaceForReplicas = 6;</code>
       * @return Whether the reservedSpaceForReplicas field is set.
       */
      @java.lang.Override
      public boolean hasReservedSpaceForReplicas() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required uint64 reservedSpaceForReplicas = 6;</code>
       * @return The reservedSpaceForReplicas.
       */
      @java.lang.Override
      public long getReservedSpaceForReplicas() {
        return reservedSpaceForReplicas_;
      }
      /**
       * <code>required uint64 reservedSpaceForReplicas = 6;</code>
       * @param value The reservedSpaceForReplicas to set.
       * @return This builder for chaining.
       */
      public Builder setReservedSpaceForReplicas(long value) {

        reservedSpaceForReplicas_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 reservedSpaceForReplicas = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearReservedSpaceForReplicas() {
        bitField0_ = (bitField0_ & ~0x00000020);
        reservedSpaceForReplicas_ = 0L;
        onChanged();
        return this;
      }

      private long numBlocks_ ;
      /**
       * <code>required uint64 numBlocks = 7;</code>
       * @return Whether the numBlocks field is set.
       */
      @java.lang.Override
      public boolean hasNumBlocks() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>required uint64 numBlocks = 7;</code>
       * @return The numBlocks.
       */
      @java.lang.Override
      public long getNumBlocks() {
        return numBlocks_;
      }
      /**
       * <code>required uint64 numBlocks = 7;</code>
       * @param value The numBlocks to set.
       * @return This builder for chaining.
       */
      public Builder setNumBlocks(long value) {

        numBlocks_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 numBlocks = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumBlocks() {
        bitField0_ = (bitField0_ & ~0x00000040);
        numBlocks_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeVolumeInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeVolumeInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeVolumeInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeVolumeInfoProto>() {
      @java.lang.Override
      public DatanodeVolumeInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeVolumeInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeVolumeInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeVolumeInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeInfosProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfosProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> 
        getDatanodesList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index);
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    int getDatanodesCount();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getDatanodesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * DatanodeInfo array
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
   */
  public static final class DatanodeInfosProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfosProto)
      DatanodeInfosProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeInfosProto.newBuilder() to construct.
    private DatanodeInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeInfosProto() {
      datanodes_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeInfosProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
    }

    public static final int DATANODES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> datanodes_;
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getDatanodesList() {
      return datanodes_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getDatanodesOrBuilderList() {
      return datanodes_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    @java.lang.Override
    public int getDatanodesCount() {
      return datanodes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
      return datanodes_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
        int index) {
      return datanodes_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getDatanodesCount(); i++) {
        if (!getDatanodes(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < datanodes_.size(); i++) {
        output.writeMessage(1, datanodes_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < datanodes_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, datanodes_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) obj;

      if (!getDatanodesList()
          .equals(other.getDatanodesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getDatanodesCount() > 0) {
        hash = (37 * hash) + DATANODES_FIELD_NUMBER;
        hash = (53 * hash) + getDatanodesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * DatanodeInfo array
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeInfosProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfosProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (datanodesBuilder_ == null) {
          datanodes_ = java.util.Collections.emptyList();
        } else {
          datanodes_ = null;
          datanodesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result) {
        if (datanodesBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            datanodes_ = java.util.Collections.unmodifiableList(datanodes_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.datanodes_ = datanodes_;
        } else {
          result.datanodes_ = datanodesBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) return this;
        if (datanodesBuilder_ == null) {
          if (!other.datanodes_.isEmpty()) {
            if (datanodes_.isEmpty()) {
              datanodes_ = other.datanodes_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureDatanodesIsMutable();
              datanodes_.addAll(other.datanodes_);
            }
            onChanged();
          }
        } else {
          if (!other.datanodes_.isEmpty()) {
            if (datanodesBuilder_.isEmpty()) {
              datanodesBuilder_.dispose();
              datanodesBuilder_ = null;
              datanodes_ = other.datanodes_;
              bitField0_ = (bitField0_ & ~0x00000001);
              datanodesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getDatanodesFieldBuilder() : null;
            } else {
              datanodesBuilder_.addAllMessages(other.datanodes_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getDatanodesCount(); i++) {
          if (!getDatanodes(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER,
                        extensionRegistry);
                if (datanodesBuilder_ == null) {
                  ensureDatanodesIsMutable();
                  datanodes_.add(m);
                } else {
                  datanodesBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> datanodes_ =
        java.util.Collections.emptyList();
      private void ensureDatanodesIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          datanodes_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(datanodes_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> datanodesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getDatanodesList() {
        if (datanodesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(datanodes_);
        } else {
          return datanodesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public int getDatanodesCount() {
        if (datanodesBuilder_ == null) {
          return datanodes_.size();
        } else {
          return datanodesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDatanodes(int index) {
        if (datanodesBuilder_ == null) {
          return datanodes_.get(index);
        } else {
          return datanodesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder setDatanodes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (datanodesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDatanodesIsMutable();
          datanodes_.set(index, value);
          onChanged();
        } else {
          datanodesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder setDatanodes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (datanodesBuilder_ == null) {
          ensureDatanodesIsMutable();
          datanodes_.set(index, builderForValue.build());
          onChanged();
        } else {
          datanodesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder addDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (datanodesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDatanodesIsMutable();
          datanodes_.add(value);
          onChanged();
        } else {
          datanodesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder addDatanodes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (datanodesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDatanodesIsMutable();
          datanodes_.add(index, value);
          onChanged();
        } else {
          datanodesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder addDatanodes(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (datanodesBuilder_ == null) {
          ensureDatanodesIsMutable();
          datanodes_.add(builderForValue.build());
          onChanged();
        } else {
          datanodesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder addDatanodes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (datanodesBuilder_ == null) {
          ensureDatanodesIsMutable();
          datanodes_.add(index, builderForValue.build());
          onChanged();
        } else {
          datanodesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder addAllDatanodes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
        if (datanodesBuilder_ == null) {
          ensureDatanodesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, datanodes_);
          onChanged();
        } else {
          datanodesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder clearDatanodes() {
        if (datanodesBuilder_ == null) {
          datanodes_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          datanodesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public Builder removeDatanodes(int index) {
        if (datanodesBuilder_ == null) {
          ensureDatanodesIsMutable();
          datanodes_.remove(index);
          onChanged();
        } else {
          datanodesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getDatanodesBuilder(
          int index) {
        return getDatanodesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getDatanodesOrBuilder(
          int index) {
        if (datanodesBuilder_ == null) {
          return datanodes_.get(index);  } else {
          return datanodesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
           getDatanodesOrBuilderList() {
        if (datanodesBuilder_ != null) {
          return datanodesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(datanodes_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder() {
        return getDatanodesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addDatanodesBuilder(
          int index) {
        return getDatanodesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto datanodes = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder> 
           getDatanodesBuilderList() {
        return getDatanodesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getDatanodesFieldBuilder() {
        if (datanodesBuilder_ == null) {
          datanodesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  datanodes_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          datanodes_ = null;
        }
        return datanodesBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfosProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfosProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfosProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeInfosProto>() {
      @java.lang.Override
      public DatanodeInfosProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfosProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfosProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     * @return Whether the id field is set.
     */
    boolean hasId();
    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     * @return The id.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId();
    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder();

    /**
     * <code>optional uint64 capacity = 2 [default = 0];</code>
     * @return Whether the capacity field is set.
     */
    boolean hasCapacity();
    /**
     * <code>optional uint64 capacity = 2 [default = 0];</code>
     * @return The capacity.
     */
    long getCapacity();

    /**
     * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
     * @return Whether the dfsUsed field is set.
     */
    boolean hasDfsUsed();
    /**
     * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
     * @return The dfsUsed.
     */
    long getDfsUsed();

    /**
     * <code>optional uint64 remaining = 4 [default = 0];</code>
     * @return Whether the remaining field is set.
     */
    boolean hasRemaining();
    /**
     * <code>optional uint64 remaining = 4 [default = 0];</code>
     * @return The remaining.
     */
    long getRemaining();

    /**
     * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
     * @return Whether the blockPoolUsed field is set.
     */
    boolean hasBlockPoolUsed();
    /**
     * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
     * @return The blockPoolUsed.
     */
    long getBlockPoolUsed();

    /**
     * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
     * @return Whether the lastUpdate field is set.
     */
    boolean hasLastUpdate();
    /**
     * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
     * @return The lastUpdate.
     */
    long getLastUpdate();

    /**
     * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
     * @return Whether the xceiverCount field is set.
     */
    boolean hasXceiverCount();
    /**
     * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
     * @return The xceiverCount.
     */
    int getXceiverCount();

    /**
     * <code>optional string location = 8;</code>
     * @return Whether the location field is set.
     */
    boolean hasLocation();
    /**
     * <code>optional string location = 8;</code>
     * @return The location.
     */
    java.lang.String getLocation();
    /**
     * <code>optional string location = 8;</code>
     * @return The bytes for location.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getLocationBytes();

    /**
     * <code>optional uint64 nonDfsUsed = 9;</code>
     * @return Whether the nonDfsUsed field is set.
     */
    boolean hasNonDfsUsed();
    /**
     * <code>optional uint64 nonDfsUsed = 9;</code>
     * @return The nonDfsUsed.
     */
    long getNonDfsUsed();

    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
     * @return Whether the adminState field is set.
     */
    boolean hasAdminState();
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
     * @return The adminState.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState();

    /**
     * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
     * @return Whether the cacheCapacity field is set.
     */
    boolean hasCacheCapacity();
    /**
     * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
     * @return The cacheCapacity.
     */
    long getCacheCapacity();

    /**
     * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
     * @return Whether the cacheUsed field is set.
     */
    boolean hasCacheUsed();
    /**
     * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
     * @return The cacheUsed.
     */
    long getCacheUsed();

    /**
     * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
     * @return Whether the lastUpdateMonotonic field is set.
     */
    boolean hasLastUpdateMonotonic();
    /**
     * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
     * @return The lastUpdateMonotonic.
     */
    long getLastUpdateMonotonic();

    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return Whether the upgradeDomain field is set.
     */
    boolean hasUpgradeDomain();
    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return The upgradeDomain.
     */
    java.lang.String getUpgradeDomain();
    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return The bytes for upgradeDomain.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getUpgradeDomainBytes();

    /**
     * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
     * @return Whether the lastBlockReportTime field is set.
     */
    boolean hasLastBlockReportTime();
    /**
     * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
     * @return The lastBlockReportTime.
     */
    long getLastBlockReportTime();

    /**
     * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
     * @return Whether the lastBlockReportMonotonic field is set.
     */
    boolean hasLastBlockReportMonotonic();
    /**
     * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
     * @return The lastBlockReportMonotonic.
     */
    long getLastBlockReportMonotonic();

    /**
     * <code>optional uint32 numBlocks = 17 [default = 0];</code>
     * @return Whether the numBlocks field is set.
     */
    boolean hasNumBlocks();
    /**
     * <code>optional uint32 numBlocks = 17 [default = 0];</code>
     * @return The numBlocks.
     */
    int getNumBlocks();

    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return Whether the softwareVersion field is set.
     */
    boolean hasSoftwareVersion();
    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return The softwareVersion.
     */
    java.lang.String getSoftwareVersion();
    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return The bytes for softwareVersion.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes();
  }
  /**
   * <pre>
   **
   * The status of a Datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
   */
  public static final class DatanodeInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeInfoProto)
      DatanodeInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeInfoProto.newBuilder() to construct.
    private DatanodeInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeInfoProto() {
      location_ = "";
      adminState_ = 0;
      upgradeDomain_ = "";
      softwareVersion_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.DatanodeInfoProto.AdminState}
     */
    public enum AdminState
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>NORMAL = 0;</code>
       */
      NORMAL(0),
      /**
       * <code>DECOMMISSION_INPROGRESS = 1;</code>
       */
      DECOMMISSION_INPROGRESS(1),
      /**
       * <code>DECOMMISSIONED = 2;</code>
       */
      DECOMMISSIONED(2),
      /**
       * <code>ENTERING_MAINTENANCE = 3;</code>
       */
      ENTERING_MAINTENANCE(3),
      /**
       * <code>IN_MAINTENANCE = 4;</code>
       */
      IN_MAINTENANCE(4),
      ;

      /**
       * <code>NORMAL = 0;</code>
       */
      public static final int NORMAL_VALUE = 0;
      /**
       * <code>DECOMMISSION_INPROGRESS = 1;</code>
       */
      public static final int DECOMMISSION_INPROGRESS_VALUE = 1;
      /**
       * <code>DECOMMISSIONED = 2;</code>
       */
      public static final int DECOMMISSIONED_VALUE = 2;
      /**
       * <code>ENTERING_MAINTENANCE = 3;</code>
       */
      public static final int ENTERING_MAINTENANCE_VALUE = 3;
      /**
       * <code>IN_MAINTENANCE = 4;</code>
       */
      public static final int IN_MAINTENANCE_VALUE = 4;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static AdminState valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static AdminState forNumber(int value) {
        switch (value) {
          case 0: return NORMAL;
          case 1: return DECOMMISSION_INPROGRESS;
          case 2: return DECOMMISSIONED;
          case 3: return ENTERING_MAINTENANCE;
          case 4: return IN_MAINTENANCE;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<AdminState>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          AdminState> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<AdminState>() {
              public AdminState findValueByNumber(int number) {
                return AdminState.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final AdminState[] VALUES = values();

      public static AdminState valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private AdminState(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeInfoProto.AdminState)
    }

    private int bitField0_;
    public static final int ID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     * @return Whether the id field is set.
     */
    @java.lang.Override
    public boolean hasId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     * @return The id.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
      return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_;
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
      return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_;
    }

    public static final int CAPACITY_FIELD_NUMBER = 2;
    private long capacity_ = 0L;
    /**
     * <code>optional uint64 capacity = 2 [default = 0];</code>
     * @return Whether the capacity field is set.
     */
    @java.lang.Override
    public boolean hasCapacity() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint64 capacity = 2 [default = 0];</code>
     * @return The capacity.
     */
    @java.lang.Override
    public long getCapacity() {
      return capacity_;
    }

    public static final int DFSUSED_FIELD_NUMBER = 3;
    private long dfsUsed_ = 0L;
    /**
     * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
     * @return Whether the dfsUsed field is set.
     */
    @java.lang.Override
    public boolean hasDfsUsed() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
     * @return The dfsUsed.
     */
    @java.lang.Override
    public long getDfsUsed() {
      return dfsUsed_;
    }

    public static final int REMAINING_FIELD_NUMBER = 4;
    private long remaining_ = 0L;
    /**
     * <code>optional uint64 remaining = 4 [default = 0];</code>
     * @return Whether the remaining field is set.
     */
    @java.lang.Override
    public boolean hasRemaining() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional uint64 remaining = 4 [default = 0];</code>
     * @return The remaining.
     */
    @java.lang.Override
    public long getRemaining() {
      return remaining_;
    }

    public static final int BLOCKPOOLUSED_FIELD_NUMBER = 5;
    private long blockPoolUsed_ = 0L;
    /**
     * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
     * @return Whether the blockPoolUsed field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolUsed() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
     * @return The blockPoolUsed.
     */
    @java.lang.Override
    public long getBlockPoolUsed() {
      return blockPoolUsed_;
    }

    public static final int LASTUPDATE_FIELD_NUMBER = 6;
    private long lastUpdate_ = 0L;
    /**
     * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
     * @return Whether the lastUpdate field is set.
     */
    @java.lang.Override
    public boolean hasLastUpdate() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
     * @return The lastUpdate.
     */
    @java.lang.Override
    public long getLastUpdate() {
      return lastUpdate_;
    }

    public static final int XCEIVERCOUNT_FIELD_NUMBER = 7;
    private int xceiverCount_ = 0;
    /**
     * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
     * @return Whether the xceiverCount field is set.
     */
    @java.lang.Override
    public boolean hasXceiverCount() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
     * @return The xceiverCount.
     */
    @java.lang.Override
    public int getXceiverCount() {
      return xceiverCount_;
    }

    public static final int LOCATION_FIELD_NUMBER = 8;
    @SuppressWarnings("serial")
    private volatile java.lang.Object location_ = "";
    /**
     * <code>optional string location = 8;</code>
     * @return Whether the location field is set.
     */
    @java.lang.Override
    public boolean hasLocation() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional string location = 8;</code>
     * @return The location.
     */
    @java.lang.Override
    public java.lang.String getLocation() {
      java.lang.Object ref = location_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          location_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string location = 8;</code>
     * @return The bytes for location.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getLocationBytes() {
      java.lang.Object ref = location_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        location_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int NONDFSUSED_FIELD_NUMBER = 9;
    private long nonDfsUsed_ = 0L;
    /**
     * <code>optional uint64 nonDfsUsed = 9;</code>
     * @return Whether the nonDfsUsed field is set.
     */
    @java.lang.Override
    public boolean hasNonDfsUsed() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional uint64 nonDfsUsed = 9;</code>
     * @return The nonDfsUsed.
     */
    @java.lang.Override
    public long getNonDfsUsed() {
      return nonDfsUsed_;
    }

    public static final int ADMINSTATE_FIELD_NUMBER = 10;
    private int adminState_ = 0;
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
     * @return Whether the adminState field is set.
     */
    @java.lang.Override public boolean hasAdminState() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
     * @return The adminState.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(adminState_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result;
    }

    public static final int CACHECAPACITY_FIELD_NUMBER = 11;
    private long cacheCapacity_ = 0L;
    /**
     * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
     * @return Whether the cacheCapacity field is set.
     */
    @java.lang.Override
    public boolean hasCacheCapacity() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
     * @return The cacheCapacity.
     */
    @java.lang.Override
    public long getCacheCapacity() {
      return cacheCapacity_;
    }

    public static final int CACHEUSED_FIELD_NUMBER = 12;
    private long cacheUsed_ = 0L;
    /**
     * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
     * @return Whether the cacheUsed field is set.
     */
    @java.lang.Override
    public boolean hasCacheUsed() {
      return ((bitField0_ & 0x00000800) != 0);
    }
    /**
     * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
     * @return The cacheUsed.
     */
    @java.lang.Override
    public long getCacheUsed() {
      return cacheUsed_;
    }

    public static final int LASTUPDATEMONOTONIC_FIELD_NUMBER = 13;
    private long lastUpdateMonotonic_ = 0L;
    /**
     * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
     * @return Whether the lastUpdateMonotonic field is set.
     */
    @java.lang.Override
    public boolean hasLastUpdateMonotonic() {
      return ((bitField0_ & 0x00001000) != 0);
    }
    /**
     * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
     * @return The lastUpdateMonotonic.
     */
    @java.lang.Override
    public long getLastUpdateMonotonic() {
      return lastUpdateMonotonic_;
    }

    public static final int UPGRADEDOMAIN_FIELD_NUMBER = 14;
    @SuppressWarnings("serial")
    private volatile java.lang.Object upgradeDomain_ = "";
    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return Whether the upgradeDomain field is set.
     */
    @java.lang.Override
    public boolean hasUpgradeDomain() {
      return ((bitField0_ & 0x00002000) != 0);
    }
    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return The upgradeDomain.
     */
    @java.lang.Override
    public java.lang.String getUpgradeDomain() {
      java.lang.Object ref = upgradeDomain_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          upgradeDomain_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string upgradeDomain = 14;</code>
     * @return The bytes for upgradeDomain.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getUpgradeDomainBytes() {
      java.lang.Object ref = upgradeDomain_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        upgradeDomain_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int LASTBLOCKREPORTTIME_FIELD_NUMBER = 15;
    private long lastBlockReportTime_ = 0L;
    /**
     * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
     * @return Whether the lastBlockReportTime field is set.
     */
    @java.lang.Override
    public boolean hasLastBlockReportTime() {
      return ((bitField0_ & 0x00004000) != 0);
    }
    /**
     * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
     * @return The lastBlockReportTime.
     */
    @java.lang.Override
    public long getLastBlockReportTime() {
      return lastBlockReportTime_;
    }

    public static final int LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER = 16;
    private long lastBlockReportMonotonic_ = 0L;
    /**
     * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
     * @return Whether the lastBlockReportMonotonic field is set.
     */
    @java.lang.Override
    public boolean hasLastBlockReportMonotonic() {
      return ((bitField0_ & 0x00008000) != 0);
    }
    /**
     * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
     * @return The lastBlockReportMonotonic.
     */
    @java.lang.Override
    public long getLastBlockReportMonotonic() {
      return lastBlockReportMonotonic_;
    }

    public static final int NUMBLOCKS_FIELD_NUMBER = 17;
    private int numBlocks_ = 0;
    /**
     * <code>optional uint32 numBlocks = 17 [default = 0];</code>
     * @return Whether the numBlocks field is set.
     */
    @java.lang.Override
    public boolean hasNumBlocks() {
      return ((bitField0_ & 0x00010000) != 0);
    }
    /**
     * <code>optional uint32 numBlocks = 17 [default = 0];</code>
     * @return The numBlocks.
     */
    @java.lang.Override
    public int getNumBlocks() {
      return numBlocks_;
    }

    public static final int SOFTWAREVERSION_FIELD_NUMBER = 18;
    @SuppressWarnings("serial")
    private volatile java.lang.Object softwareVersion_ = "";
    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return Whether the softwareVersion field is set.
     */
    @java.lang.Override
    public boolean hasSoftwareVersion() {
      return ((bitField0_ & 0x00020000) != 0);
    }
    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return The softwareVersion.
     */
    @java.lang.Override
    public java.lang.String getSoftwareVersion() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          softwareVersion_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string softwareVersion = 18;</code>
     * @return The bytes for softwareVersion.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSoftwareVersionBytes() {
      java.lang.Object ref = softwareVersion_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        softwareVersion_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getId().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, capacity_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, dfsUsed_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, remaining_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, blockPoolUsed_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, lastUpdate_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt32(7, xceiverCount_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, location_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeUInt64(9, nonDfsUsed_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeEnum(10, adminState_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeUInt64(11, cacheCapacity_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        output.writeUInt64(12, cacheUsed_);
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        output.writeUInt64(13, lastUpdateMonotonic_);
      }
      if (((bitField0_ & 0x00002000) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 14, upgradeDomain_);
      }
      if (((bitField0_ & 0x00004000) != 0)) {
        output.writeUInt64(15, lastBlockReportTime_);
      }
      if (((bitField0_ & 0x00008000) != 0)) {
        output.writeUInt64(16, lastBlockReportMonotonic_);
      }
      if (((bitField0_ & 0x00010000) != 0)) {
        output.writeUInt32(17, numBlocks_);
      }
      if (((bitField0_ & 0x00020000) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 18, softwareVersion_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, capacity_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, dfsUsed_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, remaining_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, blockPoolUsed_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, lastUpdate_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(7, xceiverCount_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(8, location_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(9, nonDfsUsed_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(10, adminState_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(11, cacheCapacity_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(12, cacheUsed_);
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(13, lastUpdateMonotonic_);
      }
      if (((bitField0_ & 0x00002000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(14, upgradeDomain_);
      }
      if (((bitField0_ & 0x00004000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(15, lastBlockReportTime_);
      }
      if (((bitField0_ & 0x00008000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(16, lastBlockReportMonotonic_);
      }
      if (((bitField0_ & 0x00010000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(17, numBlocks_);
      }
      if (((bitField0_ & 0x00020000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(18, softwareVersion_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) obj;

      if (hasId() != other.hasId()) return false;
      if (hasId()) {
        if (!getId()
            .equals(other.getId())) return false;
      }
      if (hasCapacity() != other.hasCapacity()) return false;
      if (hasCapacity()) {
        if (getCapacity()
            != other.getCapacity()) return false;
      }
      if (hasDfsUsed() != other.hasDfsUsed()) return false;
      if (hasDfsUsed()) {
        if (getDfsUsed()
            != other.getDfsUsed()) return false;
      }
      if (hasRemaining() != other.hasRemaining()) return false;
      if (hasRemaining()) {
        if (getRemaining()
            != other.getRemaining()) return false;
      }
      if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false;
      if (hasBlockPoolUsed()) {
        if (getBlockPoolUsed()
            != other.getBlockPoolUsed()) return false;
      }
      if (hasLastUpdate() != other.hasLastUpdate()) return false;
      if (hasLastUpdate()) {
        if (getLastUpdate()
            != other.getLastUpdate()) return false;
      }
      if (hasXceiverCount() != other.hasXceiverCount()) return false;
      if (hasXceiverCount()) {
        if (getXceiverCount()
            != other.getXceiverCount()) return false;
      }
      if (hasLocation() != other.hasLocation()) return false;
      if (hasLocation()) {
        if (!getLocation()
            .equals(other.getLocation())) return false;
      }
      if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false;
      if (hasNonDfsUsed()) {
        if (getNonDfsUsed()
            != other.getNonDfsUsed()) return false;
      }
      if (hasAdminState() != other.hasAdminState()) return false;
      if (hasAdminState()) {
        if (adminState_ != other.adminState_) return false;
      }
      if (hasCacheCapacity() != other.hasCacheCapacity()) return false;
      if (hasCacheCapacity()) {
        if (getCacheCapacity()
            != other.getCacheCapacity()) return false;
      }
      if (hasCacheUsed() != other.hasCacheUsed()) return false;
      if (hasCacheUsed()) {
        if (getCacheUsed()
            != other.getCacheUsed()) return false;
      }
      if (hasLastUpdateMonotonic() != other.hasLastUpdateMonotonic()) return false;
      if (hasLastUpdateMonotonic()) {
        if (getLastUpdateMonotonic()
            != other.getLastUpdateMonotonic()) return false;
      }
      if (hasUpgradeDomain() != other.hasUpgradeDomain()) return false;
      if (hasUpgradeDomain()) {
        if (!getUpgradeDomain()
            .equals(other.getUpgradeDomain())) return false;
      }
      if (hasLastBlockReportTime() != other.hasLastBlockReportTime()) return false;
      if (hasLastBlockReportTime()) {
        if (getLastBlockReportTime()
            != other.getLastBlockReportTime()) return false;
      }
      if (hasLastBlockReportMonotonic() != other.hasLastBlockReportMonotonic()) return false;
      if (hasLastBlockReportMonotonic()) {
        if (getLastBlockReportMonotonic()
            != other.getLastBlockReportMonotonic()) return false;
      }
      if (hasNumBlocks() != other.hasNumBlocks()) return false;
      if (hasNumBlocks()) {
        if (getNumBlocks()
            != other.getNumBlocks()) return false;
      }
      if (hasSoftwareVersion() != other.hasSoftwareVersion()) return false;
      if (hasSoftwareVersion()) {
        if (!getSoftwareVersion()
            .equals(other.getSoftwareVersion())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasId()) {
        hash = (37 * hash) + ID_FIELD_NUMBER;
        hash = (53 * hash) + getId().hashCode();
      }
      if (hasCapacity()) {
        hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCapacity());
      }
      if (hasDfsUsed()) {
        hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDfsUsed());
      }
      if (hasRemaining()) {
        hash = (37 * hash) + REMAINING_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getRemaining());
      }
      if (hasBlockPoolUsed()) {
        hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockPoolUsed());
      }
      if (hasLastUpdate()) {
        hash = (37 * hash) + LASTUPDATE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastUpdate());
      }
      if (hasXceiverCount()) {
        hash = (37 * hash) + XCEIVERCOUNT_FIELD_NUMBER;
        hash = (53 * hash) + getXceiverCount();
      }
      if (hasLocation()) {
        hash = (37 * hash) + LOCATION_FIELD_NUMBER;
        hash = (53 * hash) + getLocation().hashCode();
      }
      if (hasNonDfsUsed()) {
        hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNonDfsUsed());
      }
      if (hasAdminState()) {
        hash = (37 * hash) + ADMINSTATE_FIELD_NUMBER;
        hash = (53 * hash) + adminState_;
      }
      if (hasCacheCapacity()) {
        hash = (37 * hash) + CACHECAPACITY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCacheCapacity());
      }
      if (hasCacheUsed()) {
        hash = (37 * hash) + CACHEUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCacheUsed());
      }
      if (hasLastUpdateMonotonic()) {
        hash = (37 * hash) + LASTUPDATEMONOTONIC_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastUpdateMonotonic());
      }
      if (hasUpgradeDomain()) {
        hash = (37 * hash) + UPGRADEDOMAIN_FIELD_NUMBER;
        hash = (53 * hash) + getUpgradeDomain().hashCode();
      }
      if (hasLastBlockReportTime()) {
        hash = (37 * hash) + LASTBLOCKREPORTTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastBlockReportTime());
      }
      if (hasLastBlockReportMonotonic()) {
        hash = (37 * hash) + LASTBLOCKREPORTMONOTONIC_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLastBlockReportMonotonic());
      }
      if (hasNumBlocks()) {
        hash = (37 * hash) + NUMBLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getNumBlocks();
      }
      if (hasSoftwareVersion()) {
        hash = (37 * hash) + SOFTWAREVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getSoftwareVersion().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * The status of a Datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getIdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        id_ = null;
        if (idBuilder_ != null) {
          idBuilder_.dispose();
          idBuilder_ = null;
        }
        capacity_ = 0L;
        dfsUsed_ = 0L;
        remaining_ = 0L;
        blockPoolUsed_ = 0L;
        lastUpdate_ = 0L;
        xceiverCount_ = 0;
        location_ = "";
        nonDfsUsed_ = 0L;
        adminState_ = 0;
        cacheCapacity_ = 0L;
        cacheUsed_ = 0L;
        lastUpdateMonotonic_ = 0L;
        upgradeDomain_ = "";
        lastBlockReportTime_ = 0L;
        lastBlockReportMonotonic_ = 0L;
        numBlocks_ = 0;
        softwareVersion_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.id_ = idBuilder_ == null
              ? id_
              : idBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.capacity_ = capacity_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.dfsUsed_ = dfsUsed_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.remaining_ = remaining_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.blockPoolUsed_ = blockPoolUsed_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.lastUpdate_ = lastUpdate_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.xceiverCount_ = xceiverCount_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.location_ = location_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.nonDfsUsed_ = nonDfsUsed_;
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.adminState_ = adminState_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.cacheCapacity_ = cacheCapacity_;
          to_bitField0_ |= 0x00000400;
        }
        if (((from_bitField0_ & 0x00000800) != 0)) {
          result.cacheUsed_ = cacheUsed_;
          to_bitField0_ |= 0x00000800;
        }
        if (((from_bitField0_ & 0x00001000) != 0)) {
          result.lastUpdateMonotonic_ = lastUpdateMonotonic_;
          to_bitField0_ |= 0x00001000;
        }
        if (((from_bitField0_ & 0x00002000) != 0)) {
          result.upgradeDomain_ = upgradeDomain_;
          to_bitField0_ |= 0x00002000;
        }
        if (((from_bitField0_ & 0x00004000) != 0)) {
          result.lastBlockReportTime_ = lastBlockReportTime_;
          to_bitField0_ |= 0x00004000;
        }
        if (((from_bitField0_ & 0x00008000) != 0)) {
          result.lastBlockReportMonotonic_ = lastBlockReportMonotonic_;
          to_bitField0_ |= 0x00008000;
        }
        if (((from_bitField0_ & 0x00010000) != 0)) {
          result.numBlocks_ = numBlocks_;
          to_bitField0_ |= 0x00010000;
        }
        if (((from_bitField0_ & 0x00020000) != 0)) {
          result.softwareVersion_ = softwareVersion_;
          to_bitField0_ |= 0x00020000;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) return this;
        if (other.hasId()) {
          mergeId(other.getId());
        }
        if (other.hasCapacity()) {
          setCapacity(other.getCapacity());
        }
        if (other.hasDfsUsed()) {
          setDfsUsed(other.getDfsUsed());
        }
        if (other.hasRemaining()) {
          setRemaining(other.getRemaining());
        }
        if (other.hasBlockPoolUsed()) {
          setBlockPoolUsed(other.getBlockPoolUsed());
        }
        if (other.hasLastUpdate()) {
          setLastUpdate(other.getLastUpdate());
        }
        if (other.hasXceiverCount()) {
          setXceiverCount(other.getXceiverCount());
        }
        if (other.hasLocation()) {
          location_ = other.location_;
          bitField0_ |= 0x00000080;
          onChanged();
        }
        if (other.hasNonDfsUsed()) {
          setNonDfsUsed(other.getNonDfsUsed());
        }
        if (other.hasAdminState()) {
          setAdminState(other.getAdminState());
        }
        if (other.hasCacheCapacity()) {
          setCacheCapacity(other.getCacheCapacity());
        }
        if (other.hasCacheUsed()) {
          setCacheUsed(other.getCacheUsed());
        }
        if (other.hasLastUpdateMonotonic()) {
          setLastUpdateMonotonic(other.getLastUpdateMonotonic());
        }
        if (other.hasUpgradeDomain()) {
          upgradeDomain_ = other.upgradeDomain_;
          bitField0_ |= 0x00002000;
          onChanged();
        }
        if (other.hasLastBlockReportTime()) {
          setLastBlockReportTime(other.getLastBlockReportTime());
        }
        if (other.hasLastBlockReportMonotonic()) {
          setLastBlockReportMonotonic(other.getLastBlockReportMonotonic());
        }
        if (other.hasNumBlocks()) {
          setNumBlocks(other.getNumBlocks());
        }
        if (other.hasSoftwareVersion()) {
          softwareVersion_ = other.softwareVersion_;
          bitField0_ |= 0x00020000;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasId()) {
          return false;
        }
        if (!getId().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getIdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                capacity_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                dfsUsed_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                remaining_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                blockPoolUsed_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                lastUpdate_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                xceiverCount_ = input.readUInt32();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 66: {
                location_ = input.readBytes();
                bitField0_ |= 0x00000080;
                break;
              } // case 66
              case 72: {
                nonDfsUsed_ = input.readUInt64();
                bitField0_ |= 0x00000100;
                break;
              } // case 72
              case 80: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(10, tmpRaw);
                } else {
                  adminState_ = tmpRaw;
                  bitField0_ |= 0x00000200;
                }
                break;
              } // case 80
              case 88: {
                cacheCapacity_ = input.readUInt64();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              case 96: {
                cacheUsed_ = input.readUInt64();
                bitField0_ |= 0x00000800;
                break;
              } // case 96
              case 104: {
                lastUpdateMonotonic_ = input.readUInt64();
                bitField0_ |= 0x00001000;
                break;
              } // case 104
              case 114: {
                upgradeDomain_ = input.readBytes();
                bitField0_ |= 0x00002000;
                break;
              } // case 114
              case 120: {
                lastBlockReportTime_ = input.readUInt64();
                bitField0_ |= 0x00004000;
                break;
              } // case 120
              case 128: {
                lastBlockReportMonotonic_ = input.readUInt64();
                bitField0_ |= 0x00008000;
                break;
              } // case 128
              case 136: {
                numBlocks_ = input.readUInt32();
                bitField0_ |= 0x00010000;
                break;
              } // case 136
              case 146: {
                softwareVersion_ = input.readBytes();
                bitField0_ |= 0x00020000;
                break;
              } // case 146
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto id_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> idBuilder_;
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       * @return Whether the id field is set.
       */
      public boolean hasId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       * @return The id.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto getId() {
        if (idBuilder_ == null) {
          return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_;
        } else {
          return idBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public Builder setId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (idBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          id_ = value;
        } else {
          idBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public Builder setId(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder builderForValue) {
        if (idBuilder_ == null) {
          id_ = builderForValue.build();
        } else {
          idBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto value) {
        if (idBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            id_ != null &&
            id_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance()) {
            getIdBuilder().mergeFrom(value);
          } else {
            id_ = value;
          }
        } else {
          idBuilder_.mergeFrom(value);
        }
        if (id_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public Builder clearId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        id_ = null;
        if (idBuilder_ != null) {
          idBuilder_.dispose();
          idBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder getIdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getIdFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder getIdOrBuilder() {
        if (idBuilder_ != null) {
          return idBuilder_.getMessageOrBuilder();
        } else {
          return id_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.getDefaultInstance() : id_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeIDProto id = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder> 
          getIdFieldBuilder() {
        if (idBuilder_ == null) {
          idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProtoOrBuilder>(
                  getId(),
                  getParentForChildren(),
                  isClean());
          id_ = null;
        }
        return idBuilder_;
      }

      private long capacity_ ;
      /**
       * <code>optional uint64 capacity = 2 [default = 0];</code>
       * @return Whether the capacity field is set.
       */
      @java.lang.Override
      public boolean hasCapacity() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 capacity = 2 [default = 0];</code>
       * @return The capacity.
       */
      @java.lang.Override
      public long getCapacity() {
        return capacity_;
      }
      /**
       * <code>optional uint64 capacity = 2 [default = 0];</code>
       * @param value The capacity to set.
       * @return This builder for chaining.
       */
      public Builder setCapacity(long value) {

        capacity_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 capacity = 2 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCapacity() {
        bitField0_ = (bitField0_ & ~0x00000002);
        capacity_ = 0L;
        onChanged();
        return this;
      }

      private long dfsUsed_ ;
      /**
       * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
       * @return Whether the dfsUsed field is set.
       */
      @java.lang.Override
      public boolean hasDfsUsed() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
       * @return The dfsUsed.
       */
      @java.lang.Override
      public long getDfsUsed() {
        return dfsUsed_;
      }
      /**
       * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
       * @param value The dfsUsed to set.
       * @return This builder for chaining.
       */
      public Builder setDfsUsed(long value) {

        dfsUsed_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 dfsUsed = 3 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearDfsUsed() {
        bitField0_ = (bitField0_ & ~0x00000004);
        dfsUsed_ = 0L;
        onChanged();
        return this;
      }

      private long remaining_ ;
      /**
       * <code>optional uint64 remaining = 4 [default = 0];</code>
       * @return Whether the remaining field is set.
       */
      @java.lang.Override
      public boolean hasRemaining() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint64 remaining = 4 [default = 0];</code>
       * @return The remaining.
       */
      @java.lang.Override
      public long getRemaining() {
        return remaining_;
      }
      /**
       * <code>optional uint64 remaining = 4 [default = 0];</code>
       * @param value The remaining to set.
       * @return This builder for chaining.
       */
      public Builder setRemaining(long value) {

        remaining_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 remaining = 4 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearRemaining() {
        bitField0_ = (bitField0_ & ~0x00000008);
        remaining_ = 0L;
        onChanged();
        return this;
      }

      private long blockPoolUsed_ ;
      /**
       * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
       * @return Whether the blockPoolUsed field is set.
       */
      @java.lang.Override
      public boolean hasBlockPoolUsed() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
       * @return The blockPoolUsed.
       */
      @java.lang.Override
      public long getBlockPoolUsed() {
        return blockPoolUsed_;
      }
      /**
       * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
       * @param value The blockPoolUsed to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolUsed(long value) {

        blockPoolUsed_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 blockPoolUsed = 5 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolUsed() {
        bitField0_ = (bitField0_ & ~0x00000010);
        blockPoolUsed_ = 0L;
        onChanged();
        return this;
      }

      private long lastUpdate_ ;
      /**
       * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
       * @return Whether the lastUpdate field is set.
       */
      @java.lang.Override
      public boolean hasLastUpdate() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
       * @return The lastUpdate.
       */
      @java.lang.Override
      public long getLastUpdate() {
        return lastUpdate_;
      }
      /**
       * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
       * @param value The lastUpdate to set.
       * @return This builder for chaining.
       */
      public Builder setLastUpdate(long value) {

        lastUpdate_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastUpdate = 6 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearLastUpdate() {
        bitField0_ = (bitField0_ & ~0x00000020);
        lastUpdate_ = 0L;
        onChanged();
        return this;
      }

      private int xceiverCount_ ;
      /**
       * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
       * @return Whether the xceiverCount field is set.
       */
      @java.lang.Override
      public boolean hasXceiverCount() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
       * @return The xceiverCount.
       */
      @java.lang.Override
      public int getXceiverCount() {
        return xceiverCount_;
      }
      /**
       * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
       * @param value The xceiverCount to set.
       * @return This builder for chaining.
       */
      public Builder setXceiverCount(int value) {

        xceiverCount_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 xceiverCount = 7 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearXceiverCount() {
        bitField0_ = (bitField0_ & ~0x00000040);
        xceiverCount_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object location_ = "";
      /**
       * <code>optional string location = 8;</code>
       * @return Whether the location field is set.
       */
      public boolean hasLocation() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional string location = 8;</code>
       * @return The location.
       */
      public java.lang.String getLocation() {
        java.lang.Object ref = location_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            location_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string location = 8;</code>
       * @return The bytes for location.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getLocationBytes() {
        java.lang.Object ref = location_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          location_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string location = 8;</code>
       * @param value The location to set.
       * @return This builder for chaining.
       */
      public Builder setLocation(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        location_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional string location = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearLocation() {
        location_ = getDefaultInstance().getLocation();
        bitField0_ = (bitField0_ & ~0x00000080);
        onChanged();
        return this;
      }
      /**
       * <code>optional string location = 8;</code>
       * @param value The bytes for location to set.
       * @return This builder for chaining.
       */
      public Builder setLocationBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        location_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }

      private long nonDfsUsed_ ;
      /**
       * <code>optional uint64 nonDfsUsed = 9;</code>
       * @return Whether the nonDfsUsed field is set.
       */
      @java.lang.Override
      public boolean hasNonDfsUsed() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional uint64 nonDfsUsed = 9;</code>
       * @return The nonDfsUsed.
       */
      @java.lang.Override
      public long getNonDfsUsed() {
        return nonDfsUsed_;
      }
      /**
       * <code>optional uint64 nonDfsUsed = 9;</code>
       * @param value The nonDfsUsed to set.
       * @return This builder for chaining.
       */
      public Builder setNonDfsUsed(long value) {

        nonDfsUsed_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 nonDfsUsed = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearNonDfsUsed() {
        bitField0_ = (bitField0_ & ~0x00000100);
        nonDfsUsed_ = 0L;
        onChanged();
        return this;
      }

      private int adminState_ = 0;
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
       * @return Whether the adminState field is set.
       */
      @java.lang.Override public boolean hasAdminState() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
       * @return The adminState.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState getAdminState() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.forNumber(adminState_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState.NORMAL : result;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
       * @param value The adminState to set.
       * @return This builder for chaining.
       */
      public Builder setAdminState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000200;
        adminState_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto.AdminState adminState = 10 [default = NORMAL];</code>
       * @return This builder for chaining.
       */
      public Builder clearAdminState() {
        bitField0_ = (bitField0_ & ~0x00000200);
        adminState_ = 0;
        onChanged();
        return this;
      }

      private long cacheCapacity_ ;
      /**
       * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
       * @return Whether the cacheCapacity field is set.
       */
      @java.lang.Override
      public boolean hasCacheCapacity() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
       * @return The cacheCapacity.
       */
      @java.lang.Override
      public long getCacheCapacity() {
        return cacheCapacity_;
      }
      /**
       * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
       * @param value The cacheCapacity to set.
       * @return This builder for chaining.
       */
      public Builder setCacheCapacity(long value) {

        cacheCapacity_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 cacheCapacity = 11 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCacheCapacity() {
        bitField0_ = (bitField0_ & ~0x00000400);
        cacheCapacity_ = 0L;
        onChanged();
        return this;
      }

      private long cacheUsed_ ;
      /**
       * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
       * @return Whether the cacheUsed field is set.
       */
      @java.lang.Override
      public boolean hasCacheUsed() {
        return ((bitField0_ & 0x00000800) != 0);
      }
      /**
       * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
       * @return The cacheUsed.
       */
      @java.lang.Override
      public long getCacheUsed() {
        return cacheUsed_;
      }
      /**
       * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
       * @param value The cacheUsed to set.
       * @return This builder for chaining.
       */
      public Builder setCacheUsed(long value) {

        cacheUsed_ = value;
        bitField0_ |= 0x00000800;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 cacheUsed = 12 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCacheUsed() {
        bitField0_ = (bitField0_ & ~0x00000800);
        cacheUsed_ = 0L;
        onChanged();
        return this;
      }

      private long lastUpdateMonotonic_ ;
      /**
       * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
       * @return Whether the lastUpdateMonotonic field is set.
       */
      @java.lang.Override
      public boolean hasLastUpdateMonotonic() {
        return ((bitField0_ & 0x00001000) != 0);
      }
      /**
       * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
       * @return The lastUpdateMonotonic.
       */
      @java.lang.Override
      public long getLastUpdateMonotonic() {
        return lastUpdateMonotonic_;
      }
      /**
       * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
       * @param value The lastUpdateMonotonic to set.
       * @return This builder for chaining.
       */
      public Builder setLastUpdateMonotonic(long value) {

        lastUpdateMonotonic_ = value;
        bitField0_ |= 0x00001000;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastUpdateMonotonic = 13 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearLastUpdateMonotonic() {
        bitField0_ = (bitField0_ & ~0x00001000);
        lastUpdateMonotonic_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object upgradeDomain_ = "";
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @return Whether the upgradeDomain field is set.
       */
      public boolean hasUpgradeDomain() {
        return ((bitField0_ & 0x00002000) != 0);
      }
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @return The upgradeDomain.
       */
      public java.lang.String getUpgradeDomain() {
        java.lang.Object ref = upgradeDomain_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            upgradeDomain_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @return The bytes for upgradeDomain.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getUpgradeDomainBytes() {
        java.lang.Object ref = upgradeDomain_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          upgradeDomain_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @param value The upgradeDomain to set.
       * @return This builder for chaining.
       */
      public Builder setUpgradeDomain(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        upgradeDomain_ = value;
        bitField0_ |= 0x00002000;
        onChanged();
        return this;
      }
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @return This builder for chaining.
       */
      public Builder clearUpgradeDomain() {
        upgradeDomain_ = getDefaultInstance().getUpgradeDomain();
        bitField0_ = (bitField0_ & ~0x00002000);
        onChanged();
        return this;
      }
      /**
       * <code>optional string upgradeDomain = 14;</code>
       * @param value The bytes for upgradeDomain to set.
       * @return This builder for chaining.
       */
      public Builder setUpgradeDomainBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        upgradeDomain_ = value;
        bitField0_ |= 0x00002000;
        onChanged();
        return this;
      }

      private long lastBlockReportTime_ ;
      /**
       * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
       * @return Whether the lastBlockReportTime field is set.
       */
      @java.lang.Override
      public boolean hasLastBlockReportTime() {
        return ((bitField0_ & 0x00004000) != 0);
      }
      /**
       * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
       * @return The lastBlockReportTime.
       */
      @java.lang.Override
      public long getLastBlockReportTime() {
        return lastBlockReportTime_;
      }
      /**
       * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
       * @param value The lastBlockReportTime to set.
       * @return This builder for chaining.
       */
      public Builder setLastBlockReportTime(long value) {

        lastBlockReportTime_ = value;
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastBlockReportTime = 15 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearLastBlockReportTime() {
        bitField0_ = (bitField0_ & ~0x00004000);
        lastBlockReportTime_ = 0L;
        onChanged();
        return this;
      }

      private long lastBlockReportMonotonic_ ;
      /**
       * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
       * @return Whether the lastBlockReportMonotonic field is set.
       */
      @java.lang.Override
      public boolean hasLastBlockReportMonotonic() {
        return ((bitField0_ & 0x00008000) != 0);
      }
      /**
       * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
       * @return The lastBlockReportMonotonic.
       */
      @java.lang.Override
      public long getLastBlockReportMonotonic() {
        return lastBlockReportMonotonic_;
      }
      /**
       * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
       * @param value The lastBlockReportMonotonic to set.
       * @return This builder for chaining.
       */
      public Builder setLastBlockReportMonotonic(long value) {

        lastBlockReportMonotonic_ = value;
        bitField0_ |= 0x00008000;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 lastBlockReportMonotonic = 16 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearLastBlockReportMonotonic() {
        bitField0_ = (bitField0_ & ~0x00008000);
        lastBlockReportMonotonic_ = 0L;
        onChanged();
        return this;
      }

      private int numBlocks_ ;
      /**
       * <code>optional uint32 numBlocks = 17 [default = 0];</code>
       * @return Whether the numBlocks field is set.
       */
      @java.lang.Override
      public boolean hasNumBlocks() {
        return ((bitField0_ & 0x00010000) != 0);
      }
      /**
       * <code>optional uint32 numBlocks = 17 [default = 0];</code>
       * @return The numBlocks.
       */
      @java.lang.Override
      public int getNumBlocks() {
        return numBlocks_;
      }
      /**
       * <code>optional uint32 numBlocks = 17 [default = 0];</code>
       * @param value The numBlocks to set.
       * @return This builder for chaining.
       */
      public Builder setNumBlocks(int value) {

        numBlocks_ = value;
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 numBlocks = 17 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearNumBlocks() {
        bitField0_ = (bitField0_ & ~0x00010000);
        numBlocks_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object softwareVersion_ = "";
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @return Whether the softwareVersion field is set.
       */
      public boolean hasSoftwareVersion() {
        return ((bitField0_ & 0x00020000) != 0);
      }
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @return The softwareVersion.
       */
      public java.lang.String getSoftwareVersion() {
        java.lang.Object ref = softwareVersion_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            softwareVersion_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @return The bytes for softwareVersion.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSoftwareVersionBytes() {
        java.lang.Object ref = softwareVersion_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          softwareVersion_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @param value The softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersion(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00020000;
        onChanged();
        return this;
      }
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @return This builder for chaining.
       */
      public Builder clearSoftwareVersion() {
        softwareVersion_ = getDefaultInstance().getSoftwareVersion();
        bitField0_ = (bitField0_ & ~0x00020000);
        onChanged();
        return this;
      }
      /**
       * <code>optional string softwareVersion = 18;</code>
       * @param value The bytes for softwareVersion to set.
       * @return This builder for chaining.
       */
      public Builder setSoftwareVersionBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        softwareVersion_ = value;
        bitField0_ |= 0x00020000;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeInfoProto>() {
      @java.lang.Override
      public DatanodeInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DatanodeStorageProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DatanodeStorageProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string storageUuid = 1;</code>
     * @return Whether the storageUuid field is set.
     */
    boolean hasStorageUuid();
    /**
     * <code>required string storageUuid = 1;</code>
     * @return The storageUuid.
     */
    java.lang.String getStorageUuid();
    /**
     * <code>required string storageUuid = 1;</code>
     * @return The bytes for storageUuid.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes();

    /**
     * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
     * @return Whether the state field is set.
     */
    boolean hasState();
    /**
     * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
     * @return The state.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState();

    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    boolean hasStorageType();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
     * @return The storageType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();
  }
  /**
   * <pre>
   **
   * Represents a storage available on the datanode
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
   */
  public static final class DatanodeStorageProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DatanodeStorageProto)
      DatanodeStorageProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DatanodeStorageProto.newBuilder() to construct.
    private DatanodeStorageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DatanodeStorageProto() {
      storageUuid_ = "";
      state_ = 0;
      storageType_ = 1;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DatanodeStorageProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.DatanodeStorageProto.StorageState}
     */
    public enum StorageState
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>NORMAL = 0;</code>
       */
      NORMAL(0),
      /**
       * <code>READ_ONLY_SHARED = 1;</code>
       */
      READ_ONLY_SHARED(1),
      ;

      /**
       * <code>NORMAL = 0;</code>
       */
      public static final int NORMAL_VALUE = 0;
      /**
       * <code>READ_ONLY_SHARED = 1;</code>
       */
      public static final int READ_ONLY_SHARED_VALUE = 1;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static StorageState valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static StorageState forNumber(int value) {
        switch (value) {
          case 0: return NORMAL;
          case 1: return READ_ONLY_SHARED;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<StorageState>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          StorageState> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<StorageState>() {
              public StorageState findValueByNumber(int number) {
                return StorageState.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final StorageState[] VALUES = values();

      public static StorageState valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private StorageState(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DatanodeStorageProto.StorageState)
    }

    private int bitField0_;
    public static final int STORAGEUUID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object storageUuid_ = "";
    /**
     * <code>required string storageUuid = 1;</code>
     * @return Whether the storageUuid field is set.
     */
    @java.lang.Override
    public boolean hasStorageUuid() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string storageUuid = 1;</code>
     * @return The storageUuid.
     */
    @java.lang.Override
    public java.lang.String getStorageUuid() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          storageUuid_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string storageUuid = 1;</code>
     * @return The bytes for storageUuid.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        storageUuid_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int STATE_FIELD_NUMBER = 2;
    private int state_ = 0;
    /**
     * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
     * @return Whether the state field is set.
     */
    @java.lang.Override public boolean hasState() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
     * @return The state.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(state_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result;
    }

    public static final int STORAGETYPE_FIELD_NUMBER = 3;
    private int storageType_ = 1;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    @java.lang.Override public boolean hasStorageType() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
     * @return The storageType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStorageUuid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(2, state_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeEnum(3, storageType_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(2, state_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(3, storageType_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) obj;

      if (hasStorageUuid() != other.hasStorageUuid()) return false;
      if (hasStorageUuid()) {
        if (!getStorageUuid()
            .equals(other.getStorageUuid())) return false;
      }
      if (hasState() != other.hasState()) return false;
      if (hasState()) {
        if (state_ != other.state_) return false;
      }
      if (hasStorageType() != other.hasStorageType()) return false;
      if (hasStorageType()) {
        if (storageType_ != other.storageType_) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStorageUuid()) {
        hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
        hash = (53 * hash) + getStorageUuid().hashCode();
      }
      if (hasState()) {
        hash = (37 * hash) + STATE_FIELD_NUMBER;
        hash = (53 * hash) + state_;
      }
      if (hasStorageType()) {
        hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
        hash = (53 * hash) + storageType_;
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Represents a storage available on the datanode
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DatanodeStorageProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DatanodeStorageProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storageUuid_ = "";
        state_ = 0;
        storageType_ = 1;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.storageUuid_ = storageUuid_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.state_ = state_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.storageType_ = storageType_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) return this;
        if (other.hasStorageUuid()) {
          storageUuid_ = other.storageUuid_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasState()) {
          setState(other.getState());
        }
        if (other.hasStorageType()) {
          setStorageType(other.getStorageType());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStorageUuid()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                storageUuid_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  state_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 16
              case 24: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(3, tmpRaw);
                } else {
                  storageType_ = tmpRaw;
                  bitField0_ |= 0x00000004;
                }
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object storageUuid_ = "";
      /**
       * <code>required string storageUuid = 1;</code>
       * @return Whether the storageUuid field is set.
       */
      public boolean hasStorageUuid() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string storageUuid = 1;</code>
       * @return The storageUuid.
       */
      public java.lang.String getStorageUuid() {
        java.lang.Object ref = storageUuid_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            storageUuid_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1;</code>
       * @return The bytes for storageUuid.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageUuidBytes() {
        java.lang.Object ref = storageUuid_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          storageUuid_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1;</code>
       * @param value The storageUuid to set.
       * @return This builder for chaining.
       */
      public Builder setStorageUuid(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageUuid() {
        storageUuid_ = getDefaultInstance().getStorageUuid();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1;</code>
       * @param value The bytes for storageUuid to set.
       * @return This builder for chaining.
       */
      public Builder setStorageUuidBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private int state_ = 0;
      /**
       * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
       * @return Whether the state field is set.
       */
      @java.lang.Override public boolean hasState() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
       * @return The state.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState getState() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.forNumber(state_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState.NORMAL : result;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
       * @param value The state to set.
       * @return This builder for chaining.
       */
      public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        state_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeStorageProto.StorageState state = 2 [default = NORMAL];</code>
       * @return This builder for chaining.
       */
      public Builder clearState() {
        bitField0_ = (bitField0_ & ~0x00000002);
        state_ = 0;
        onChanged();
        return this;
      }

      private int storageType_ = 1;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
       * @return Whether the storageType field is set.
       */
      @java.lang.Override public boolean hasStorageType() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
       * @return The storageType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
       * @param value The storageType to set.
       * @return This builder for chaining.
       */
      public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000004;
        storageType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 3 [default = DISK];</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageType() {
        bitField0_ = (bitField0_ & ~0x00000004);
        storageType_ = 1;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DatanodeStorageProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DatanodeStorageProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeStorageProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DatanodeStorageProto>() {
      @java.lang.Override
      public DatanodeStorageProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeStorageProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DatanodeStorageProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageReportProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageReportProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return Whether the storageUuid field is set.
     */
    @java.lang.Deprecated boolean hasStorageUuid();
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return The storageUuid.
     */
    @java.lang.Deprecated java.lang.String getStorageUuid();
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return The bytes for storageUuid.
     */
    @java.lang.Deprecated org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes();

    /**
     * <code>optional bool failed = 2 [default = false];</code>
     * @return Whether the failed field is set.
     */
    boolean hasFailed();
    /**
     * <code>optional bool failed = 2 [default = false];</code>
     * @return The failed.
     */
    boolean getFailed();

    /**
     * <code>optional uint64 capacity = 3 [default = 0];</code>
     * @return Whether the capacity field is set.
     */
    boolean hasCapacity();
    /**
     * <code>optional uint64 capacity = 3 [default = 0];</code>
     * @return The capacity.
     */
    long getCapacity();

    /**
     * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
     * @return Whether the dfsUsed field is set.
     */
    boolean hasDfsUsed();
    /**
     * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
     * @return The dfsUsed.
     */
    long getDfsUsed();

    /**
     * <code>optional uint64 remaining = 5 [default = 0];</code>
     * @return Whether the remaining field is set.
     */
    boolean hasRemaining();
    /**
     * <code>optional uint64 remaining = 5 [default = 0];</code>
     * @return The remaining.
     */
    long getRemaining();

    /**
     * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
     * @return Whether the blockPoolUsed field is set.
     */
    boolean hasBlockPoolUsed();
    /**
     * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
     * @return The blockPoolUsed.
     */
    long getBlockPoolUsed();

    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     * @return Whether the storage field is set.
     */
    boolean hasStorage();
    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     * @return The storage.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage();
    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder();

    /**
     * <code>optional uint64 nonDfsUsed = 8;</code>
     * @return Whether the nonDfsUsed field is set.
     */
    boolean hasNonDfsUsed();
    /**
     * <code>optional uint64 nonDfsUsed = 8;</code>
     * @return The nonDfsUsed.
     */
    long getNonDfsUsed();

    /**
     * <code>optional string mount = 9;</code>
     * @return Whether the mount field is set.
     */
    boolean hasMount();
    /**
     * <code>optional string mount = 9;</code>
     * @return The mount.
     */
    java.lang.String getMount();
    /**
     * <code>optional string mount = 9;</code>
     * @return The bytes for mount.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getMountBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.StorageReportProto}
   */
  public static final class StorageReportProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageReportProto)
      StorageReportProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageReportProto.newBuilder() to construct.
    private StorageReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageReportProto() {
      storageUuid_ = "";
      mount_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageReportProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
    }

    private int bitField0_;
    public static final int STORAGEUUID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object storageUuid_ = "";
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return Whether the storageUuid field is set.
     */
    @java.lang.Override
    @java.lang.Deprecated public boolean hasStorageUuid() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return The storageUuid.
     */
    @java.lang.Override
    @java.lang.Deprecated public java.lang.String getStorageUuid() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          storageUuid_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string storageUuid = 1 [deprecated = true];</code>
     * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
     *     See hdfs.proto;l=153
     * @return The bytes for storageUuid.
     */
    @java.lang.Override
    @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidBytes() {
      java.lang.Object ref = storageUuid_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        storageUuid_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int FAILED_FIELD_NUMBER = 2;
    private boolean failed_ = false;
    /**
     * <code>optional bool failed = 2 [default = false];</code>
     * @return Whether the failed field is set.
     */
    @java.lang.Override
    public boolean hasFailed() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional bool failed = 2 [default = false];</code>
     * @return The failed.
     */
    @java.lang.Override
    public boolean getFailed() {
      return failed_;
    }

    public static final int CAPACITY_FIELD_NUMBER = 3;
    private long capacity_ = 0L;
    /**
     * <code>optional uint64 capacity = 3 [default = 0];</code>
     * @return Whether the capacity field is set.
     */
    @java.lang.Override
    public boolean hasCapacity() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint64 capacity = 3 [default = 0];</code>
     * @return The capacity.
     */
    @java.lang.Override
    public long getCapacity() {
      return capacity_;
    }

    public static final int DFSUSED_FIELD_NUMBER = 4;
    private long dfsUsed_ = 0L;
    /**
     * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
     * @return Whether the dfsUsed field is set.
     */
    @java.lang.Override
    public boolean hasDfsUsed() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
     * @return The dfsUsed.
     */
    @java.lang.Override
    public long getDfsUsed() {
      return dfsUsed_;
    }

    public static final int REMAINING_FIELD_NUMBER = 5;
    private long remaining_ = 0L;
    /**
     * <code>optional uint64 remaining = 5 [default = 0];</code>
     * @return Whether the remaining field is set.
     */
    @java.lang.Override
    public boolean hasRemaining() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 remaining = 5 [default = 0];</code>
     * @return The remaining.
     */
    @java.lang.Override
    public long getRemaining() {
      return remaining_;
    }

    public static final int BLOCKPOOLUSED_FIELD_NUMBER = 6;
    private long blockPoolUsed_ = 0L;
    /**
     * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
     * @return Whether the blockPoolUsed field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolUsed() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
     * @return The blockPoolUsed.
     */
    @java.lang.Override
    public long getBlockPoolUsed() {
      return blockPoolUsed_;
    }

    public static final int STORAGE_FIELD_NUMBER = 7;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     * @return Whether the storage field is set.
     */
    @java.lang.Override
    public boolean hasStorage() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     * @return The storage.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }
    /**
     * <pre>
     * supersedes StorageUuid
     * </pre>
     *
     * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
      return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
    }

    public static final int NONDFSUSED_FIELD_NUMBER = 8;
    private long nonDfsUsed_ = 0L;
    /**
     * <code>optional uint64 nonDfsUsed = 8;</code>
     * @return Whether the nonDfsUsed field is set.
     */
    @java.lang.Override
    public boolean hasNonDfsUsed() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional uint64 nonDfsUsed = 8;</code>
     * @return The nonDfsUsed.
     */
    @java.lang.Override
    public long getNonDfsUsed() {
      return nonDfsUsed_;
    }

    public static final int MOUNT_FIELD_NUMBER = 9;
    @SuppressWarnings("serial")
    private volatile java.lang.Object mount_ = "";
    /**
     * <code>optional string mount = 9;</code>
     * @return Whether the mount field is set.
     */
    @java.lang.Override
    public boolean hasMount() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional string mount = 9;</code>
     * @return The mount.
     */
    @java.lang.Override
    public java.lang.String getMount() {
      java.lang.Object ref = mount_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          mount_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string mount = 9;</code>
     * @return The bytes for mount.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getMountBytes() {
      java.lang.Object ref = mount_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        mount_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStorageUuid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasStorage()) {
        if (!getStorage().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuid_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, failed_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, capacity_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, dfsUsed_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, remaining_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, blockPoolUsed_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeMessage(7, getStorage());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeUInt64(8, nonDfsUsed_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, mount_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, storageUuid_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, failed_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, capacity_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, dfsUsed_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, remaining_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, blockPoolUsed_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(7, getStorage());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(8, nonDfsUsed_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, mount_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) obj;

      if (hasStorageUuid() != other.hasStorageUuid()) return false;
      if (hasStorageUuid()) {
        if (!getStorageUuid()
            .equals(other.getStorageUuid())) return false;
      }
      if (hasFailed() != other.hasFailed()) return false;
      if (hasFailed()) {
        if (getFailed()
            != other.getFailed()) return false;
      }
      if (hasCapacity() != other.hasCapacity()) return false;
      if (hasCapacity()) {
        if (getCapacity()
            != other.getCapacity()) return false;
      }
      if (hasDfsUsed() != other.hasDfsUsed()) return false;
      if (hasDfsUsed()) {
        if (getDfsUsed()
            != other.getDfsUsed()) return false;
      }
      if (hasRemaining() != other.hasRemaining()) return false;
      if (hasRemaining()) {
        if (getRemaining()
            != other.getRemaining()) return false;
      }
      if (hasBlockPoolUsed() != other.hasBlockPoolUsed()) return false;
      if (hasBlockPoolUsed()) {
        if (getBlockPoolUsed()
            != other.getBlockPoolUsed()) return false;
      }
      if (hasStorage() != other.hasStorage()) return false;
      if (hasStorage()) {
        if (!getStorage()
            .equals(other.getStorage())) return false;
      }
      if (hasNonDfsUsed() != other.hasNonDfsUsed()) return false;
      if (hasNonDfsUsed()) {
        if (getNonDfsUsed()
            != other.getNonDfsUsed()) return false;
      }
      if (hasMount() != other.hasMount()) return false;
      if (hasMount()) {
        if (!getMount()
            .equals(other.getMount())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStorageUuid()) {
        hash = (37 * hash) + STORAGEUUID_FIELD_NUMBER;
        hash = (53 * hash) + getStorageUuid().hashCode();
      }
      if (hasFailed()) {
        hash = (37 * hash) + FAILED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getFailed());
      }
      if (hasCapacity()) {
        hash = (37 * hash) + CAPACITY_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCapacity());
      }
      if (hasDfsUsed()) {
        hash = (37 * hash) + DFSUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDfsUsed());
      }
      if (hasRemaining()) {
        hash = (37 * hash) + REMAINING_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getRemaining());
      }
      if (hasBlockPoolUsed()) {
        hash = (37 * hash) + BLOCKPOOLUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockPoolUsed());
      }
      if (hasStorage()) {
        hash = (37 * hash) + STORAGE_FIELD_NUMBER;
        hash = (53 * hash) + getStorage().hashCode();
      }
      if (hasNonDfsUsed()) {
        hash = (37 * hash) + NONDFSUSED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNonDfsUsed());
      }
      if (hasMount()) {
        hash = (37 * hash) + MOUNT_FIELD_NUMBER;
        hash = (53 * hash) + getMount().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.StorageReportProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageReportProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getStorageFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storageUuid_ = "";
        failed_ = false;
        capacity_ = 0L;
        dfsUsed_ = 0L;
        remaining_ = 0L;
        blockPoolUsed_ = 0L;
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        nonDfsUsed_ = 0L;
        mount_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageReportProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.storageUuid_ = storageUuid_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.failed_ = failed_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.capacity_ = capacity_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.dfsUsed_ = dfsUsed_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.remaining_ = remaining_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.blockPoolUsed_ = blockPoolUsed_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.storage_ = storageBuilder_ == null
              ? storage_
              : storageBuilder_.build();
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.nonDfsUsed_ = nonDfsUsed_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.mount_ = mount_;
          to_bitField0_ |= 0x00000100;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto.getDefaultInstance()) return this;
        if (other.hasStorageUuid()) {
          storageUuid_ = other.storageUuid_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasFailed()) {
          setFailed(other.getFailed());
        }
        if (other.hasCapacity()) {
          setCapacity(other.getCapacity());
        }
        if (other.hasDfsUsed()) {
          setDfsUsed(other.getDfsUsed());
        }
        if (other.hasRemaining()) {
          setRemaining(other.getRemaining());
        }
        if (other.hasBlockPoolUsed()) {
          setBlockPoolUsed(other.getBlockPoolUsed());
        }
        if (other.hasStorage()) {
          mergeStorage(other.getStorage());
        }
        if (other.hasNonDfsUsed()) {
          setNonDfsUsed(other.getNonDfsUsed());
        }
        if (other.hasMount()) {
          mount_ = other.mount_;
          bitField0_ |= 0x00000100;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStorageUuid()) {
          return false;
        }
        if (hasStorage()) {
          if (!getStorage().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                storageUuid_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                failed_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                capacity_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                dfsUsed_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                remaining_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                blockPoolUsed_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 58: {
                input.readMessage(
                    getStorageFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              case 64: {
                nonDfsUsed_ = input.readUInt64();
                bitField0_ |= 0x00000080;
                break;
              } // case 64
              case 74: {
                mount_ = input.readBytes();
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object storageUuid_ = "";
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @return Whether the storageUuid field is set.
       */
      @java.lang.Deprecated public boolean hasStorageUuid() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @return The storageUuid.
       */
      @java.lang.Deprecated public java.lang.String getStorageUuid() {
        java.lang.Object ref = storageUuid_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            storageUuid_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @return The bytes for storageUuid.
       */
      @java.lang.Deprecated public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageUuidBytes() {
        java.lang.Object ref = storageUuid_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          storageUuid_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @param value The storageUuid to set.
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder setStorageUuid(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder clearStorageUuid() {
        storageUuid_ = getDefaultInstance().getStorageUuid();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string storageUuid = 1 [deprecated = true];</code>
       * @deprecated hadoop.hdfs.StorageReportProto.storageUuid is deprecated.
       *     See hdfs.proto;l=153
       * @param value The bytes for storageUuid to set.
       * @return This builder for chaining.
       */
      @java.lang.Deprecated public Builder setStorageUuidBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        storageUuid_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private boolean failed_ ;
      /**
       * <code>optional bool failed = 2 [default = false];</code>
       * @return Whether the failed field is set.
       */
      @java.lang.Override
      public boolean hasFailed() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bool failed = 2 [default = false];</code>
       * @return The failed.
       */
      @java.lang.Override
      public boolean getFailed() {
        return failed_;
      }
      /**
       * <code>optional bool failed = 2 [default = false];</code>
       * @param value The failed to set.
       * @return This builder for chaining.
       */
      public Builder setFailed(boolean value) {

        failed_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool failed = 2 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearFailed() {
        bitField0_ = (bitField0_ & ~0x00000002);
        failed_ = false;
        onChanged();
        return this;
      }

      private long capacity_ ;
      /**
       * <code>optional uint64 capacity = 3 [default = 0];</code>
       * @return Whether the capacity field is set.
       */
      @java.lang.Override
      public boolean hasCapacity() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 capacity = 3 [default = 0];</code>
       * @return The capacity.
       */
      @java.lang.Override
      public long getCapacity() {
        return capacity_;
      }
      /**
       * <code>optional uint64 capacity = 3 [default = 0];</code>
       * @param value The capacity to set.
       * @return This builder for chaining.
       */
      public Builder setCapacity(long value) {

        capacity_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 capacity = 3 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearCapacity() {
        bitField0_ = (bitField0_ & ~0x00000004);
        capacity_ = 0L;
        onChanged();
        return this;
      }

      private long dfsUsed_ ;
      /**
       * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
       * @return Whether the dfsUsed field is set.
       */
      @java.lang.Override
      public boolean hasDfsUsed() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
       * @return The dfsUsed.
       */
      @java.lang.Override
      public long getDfsUsed() {
        return dfsUsed_;
      }
      /**
       * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
       * @param value The dfsUsed to set.
       * @return This builder for chaining.
       */
      public Builder setDfsUsed(long value) {

        dfsUsed_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 dfsUsed = 4 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearDfsUsed() {
        bitField0_ = (bitField0_ & ~0x00000008);
        dfsUsed_ = 0L;
        onChanged();
        return this;
      }

      private long remaining_ ;
      /**
       * <code>optional uint64 remaining = 5 [default = 0];</code>
       * @return Whether the remaining field is set.
       */
      @java.lang.Override
      public boolean hasRemaining() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 remaining = 5 [default = 0];</code>
       * @return The remaining.
       */
      @java.lang.Override
      public long getRemaining() {
        return remaining_;
      }
      /**
       * <code>optional uint64 remaining = 5 [default = 0];</code>
       * @param value The remaining to set.
       * @return This builder for chaining.
       */
      public Builder setRemaining(long value) {

        remaining_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 remaining = 5 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearRemaining() {
        bitField0_ = (bitField0_ & ~0x00000010);
        remaining_ = 0L;
        onChanged();
        return this;
      }

      private long blockPoolUsed_ ;
      /**
       * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
       * @return Whether the blockPoolUsed field is set.
       */
      @java.lang.Override
      public boolean hasBlockPoolUsed() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
       * @return The blockPoolUsed.
       */
      @java.lang.Override
      public long getBlockPoolUsed() {
        return blockPoolUsed_;
      }
      /**
       * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
       * @param value The blockPoolUsed to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolUsed(long value) {

        blockPoolUsed_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 blockPoolUsed = 6 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolUsed() {
        bitField0_ = (bitField0_ & ~0x00000020);
        blockPoolUsed_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto storage_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> storageBuilder_;
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       * @return Whether the storage field is set.
       */
      public boolean hasStorage() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       * @return The storage.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto getStorage() {
        if (storageBuilder_ == null) {
          return storage_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        } else {
          return storageBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          storage_ = value;
        } else {
          storageBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public Builder setStorage(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder builderForValue) {
        if (storageBuilder_ == null) {
          storage_ = builderForValue.build();
        } else {
          storageBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto value) {
        if (storageBuilder_ == null) {
          if (((bitField0_ & 0x00000040) != 0) &&
            storage_ != null &&
            storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance()) {
            getStorageBuilder().mergeFrom(value);
          } else {
            storage_ = value;
          }
        } else {
          storageBuilder_.mergeFrom(value);
        }
        if (storage_ != null) {
          bitField0_ |= 0x00000040;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public Builder clearStorage() {
        bitField0_ = (bitField0_ & ~0x00000040);
        storage_ = null;
        if (storageBuilder_ != null) {
          storageBuilder_.dispose();
          storageBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder getStorageBuilder() {
        bitField0_ |= 0x00000040;
        onChanged();
        return getStorageFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder getStorageOrBuilder() {
        if (storageBuilder_ != null) {
          return storageBuilder_.getMessageOrBuilder();
        } else {
          return storage_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.getDefaultInstance() : storage_;
        }
      }
      /**
       * <pre>
       * supersedes StorageUuid
       * </pre>
       *
       * <code>optional .hadoop.hdfs.DatanodeStorageProto storage = 7;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder> 
          getStorageFieldBuilder() {
        if (storageBuilder_ == null) {
          storageBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProtoOrBuilder>(
                  getStorage(),
                  getParentForChildren(),
                  isClean());
          storage_ = null;
        }
        return storageBuilder_;
      }

      private long nonDfsUsed_ ;
      /**
       * <code>optional uint64 nonDfsUsed = 8;</code>
       * @return Whether the nonDfsUsed field is set.
       */
      @java.lang.Override
      public boolean hasNonDfsUsed() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional uint64 nonDfsUsed = 8;</code>
       * @return The nonDfsUsed.
       */
      @java.lang.Override
      public long getNonDfsUsed() {
        return nonDfsUsed_;
      }
      /**
       * <code>optional uint64 nonDfsUsed = 8;</code>
       * @param value The nonDfsUsed to set.
       * @return This builder for chaining.
       */
      public Builder setNonDfsUsed(long value) {

        nonDfsUsed_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 nonDfsUsed = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearNonDfsUsed() {
        bitField0_ = (bitField0_ & ~0x00000080);
        nonDfsUsed_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object mount_ = "";
      /**
       * <code>optional string mount = 9;</code>
       * @return Whether the mount field is set.
       */
      public boolean hasMount() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional string mount = 9;</code>
       * @return The mount.
       */
      public java.lang.String getMount() {
        java.lang.Object ref = mount_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            mount_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string mount = 9;</code>
       * @return The bytes for mount.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getMountBytes() {
        java.lang.Object ref = mount_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          mount_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string mount = 9;</code>
       * @param value The mount to set.
       * @return This builder for chaining.
       */
      public Builder setMount(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        mount_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional string mount = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearMount() {
        mount_ = getDefaultInstance().getMount();
        bitField0_ = (bitField0_ & ~0x00000100);
        onChanged();
        return this;
      }
      /**
       * <code>optional string mount = 9;</code>
       * @param value The bytes for mount to set.
       * @return This builder for chaining.
       */
      public Builder setMountBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        mount_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageReportProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageReportProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageReportProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageReportProto>() {
      @java.lang.Override
      public StorageReportProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageReportProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageReportProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ContentSummaryProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ContentSummaryProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 length = 1;</code>
     * @return Whether the length field is set.
     */
    boolean hasLength();
    /**
     * <code>required uint64 length = 1;</code>
     * @return The length.
     */
    long getLength();

    /**
     * <code>required uint64 fileCount = 2;</code>
     * @return Whether the fileCount field is set.
     */
    boolean hasFileCount();
    /**
     * <code>required uint64 fileCount = 2;</code>
     * @return The fileCount.
     */
    long getFileCount();

    /**
     * <code>required uint64 directoryCount = 3;</code>
     * @return Whether the directoryCount field is set.
     */
    boolean hasDirectoryCount();
    /**
     * <code>required uint64 directoryCount = 3;</code>
     * @return The directoryCount.
     */
    long getDirectoryCount();

    /**
     * <code>required uint64 quota = 4;</code>
     * @return Whether the quota field is set.
     */
    boolean hasQuota();
    /**
     * <code>required uint64 quota = 4;</code>
     * @return The quota.
     */
    long getQuota();

    /**
     * <code>required uint64 spaceConsumed = 5;</code>
     * @return Whether the spaceConsumed field is set.
     */
    boolean hasSpaceConsumed();
    /**
     * <code>required uint64 spaceConsumed = 5;</code>
     * @return The spaceConsumed.
     */
    long getSpaceConsumed();

    /**
     * <code>required uint64 spaceQuota = 6;</code>
     * @return Whether the spaceQuota field is set.
     */
    boolean hasSpaceQuota();
    /**
     * <code>required uint64 spaceQuota = 6;</code>
     * @return The spaceQuota.
     */
    long getSpaceQuota();

    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     * @return Whether the typeQuotaInfos field is set.
     */
    boolean hasTypeQuotaInfos();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     * @return The typeQuotaInfos.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder();

    /**
     * <code>optional uint64 snapshotLength = 8;</code>
     * @return Whether the snapshotLength field is set.
     */
    boolean hasSnapshotLength();
    /**
     * <code>optional uint64 snapshotLength = 8;</code>
     * @return The snapshotLength.
     */
    long getSnapshotLength();

    /**
     * <code>optional uint64 snapshotFileCount = 9;</code>
     * @return Whether the snapshotFileCount field is set.
     */
    boolean hasSnapshotFileCount();
    /**
     * <code>optional uint64 snapshotFileCount = 9;</code>
     * @return The snapshotFileCount.
     */
    long getSnapshotFileCount();

    /**
     * <code>optional uint64 snapshotDirectoryCount = 10;</code>
     * @return Whether the snapshotDirectoryCount field is set.
     */
    boolean hasSnapshotDirectoryCount();
    /**
     * <code>optional uint64 snapshotDirectoryCount = 10;</code>
     * @return The snapshotDirectoryCount.
     */
    long getSnapshotDirectoryCount();

    /**
     * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
     * @return Whether the snapshotSpaceConsumed field is set.
     */
    boolean hasSnapshotSpaceConsumed();
    /**
     * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
     * @return The snapshotSpaceConsumed.
     */
    long getSnapshotSpaceConsumed();

    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return Whether the erasureCodingPolicy field is set.
     */
    boolean hasErasureCodingPolicy();
    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return The erasureCodingPolicy.
     */
    java.lang.String getErasureCodingPolicy();
    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return The bytes for erasureCodingPolicy.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getErasureCodingPolicyBytes();
  }
  /**
   * <pre>
   **
   * Summary of a file or directory
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
   */
  public static final class ContentSummaryProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ContentSummaryProto)
      ContentSummaryProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ContentSummaryProto.newBuilder() to construct.
    private ContentSummaryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ContentSummaryProto() {
      erasureCodingPolicy_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ContentSummaryProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
    }

    private int bitField0_;
    public static final int LENGTH_FIELD_NUMBER = 1;
    private long length_ = 0L;
    /**
     * <code>required uint64 length = 1;</code>
     * @return Whether the length field is set.
     */
    @java.lang.Override
    public boolean hasLength() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 length = 1;</code>
     * @return The length.
     */
    @java.lang.Override
    public long getLength() {
      return length_;
    }

    public static final int FILECOUNT_FIELD_NUMBER = 2;
    private long fileCount_ = 0L;
    /**
     * <code>required uint64 fileCount = 2;</code>
     * @return Whether the fileCount field is set.
     */
    @java.lang.Override
    public boolean hasFileCount() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 fileCount = 2;</code>
     * @return The fileCount.
     */
    @java.lang.Override
    public long getFileCount() {
      return fileCount_;
    }

    public static final int DIRECTORYCOUNT_FIELD_NUMBER = 3;
    private long directoryCount_ = 0L;
    /**
     * <code>required uint64 directoryCount = 3;</code>
     * @return Whether the directoryCount field is set.
     */
    @java.lang.Override
    public boolean hasDirectoryCount() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 directoryCount = 3;</code>
     * @return The directoryCount.
     */
    @java.lang.Override
    public long getDirectoryCount() {
      return directoryCount_;
    }

    public static final int QUOTA_FIELD_NUMBER = 4;
    private long quota_ = 0L;
    /**
     * <code>required uint64 quota = 4;</code>
     * @return Whether the quota field is set.
     */
    @java.lang.Override
    public boolean hasQuota() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required uint64 quota = 4;</code>
     * @return The quota.
     */
    @java.lang.Override
    public long getQuota() {
      return quota_;
    }

    public static final int SPACECONSUMED_FIELD_NUMBER = 5;
    private long spaceConsumed_ = 0L;
    /**
     * <code>required uint64 spaceConsumed = 5;</code>
     * @return Whether the spaceConsumed field is set.
     */
    @java.lang.Override
    public boolean hasSpaceConsumed() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required uint64 spaceConsumed = 5;</code>
     * @return The spaceConsumed.
     */
    @java.lang.Override
    public long getSpaceConsumed() {
      return spaceConsumed_;
    }

    public static final int SPACEQUOTA_FIELD_NUMBER = 6;
    private long spaceQuota_ = 0L;
    /**
     * <code>required uint64 spaceQuota = 6;</code>
     * @return Whether the spaceQuota field is set.
     */
    @java.lang.Override
    public boolean hasSpaceQuota() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>required uint64 spaceQuota = 6;</code>
     * @return The spaceQuota.
     */
    @java.lang.Override
    public long getSpaceQuota() {
      return spaceQuota_;
    }

    public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 7;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     * @return Whether the typeQuotaInfos field is set.
     */
    @java.lang.Override
    public boolean hasTypeQuotaInfos() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     * @return The typeQuotaInfos.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
      return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
      return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
    }

    public static final int SNAPSHOTLENGTH_FIELD_NUMBER = 8;
    private long snapshotLength_ = 0L;
    /**
     * <code>optional uint64 snapshotLength = 8;</code>
     * @return Whether the snapshotLength field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotLength() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional uint64 snapshotLength = 8;</code>
     * @return The snapshotLength.
     */
    @java.lang.Override
    public long getSnapshotLength() {
      return snapshotLength_;
    }

    public static final int SNAPSHOTFILECOUNT_FIELD_NUMBER = 9;
    private long snapshotFileCount_ = 0L;
    /**
     * <code>optional uint64 snapshotFileCount = 9;</code>
     * @return Whether the snapshotFileCount field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotFileCount() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional uint64 snapshotFileCount = 9;</code>
     * @return The snapshotFileCount.
     */
    @java.lang.Override
    public long getSnapshotFileCount() {
      return snapshotFileCount_;
    }

    public static final int SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER = 10;
    private long snapshotDirectoryCount_ = 0L;
    /**
     * <code>optional uint64 snapshotDirectoryCount = 10;</code>
     * @return Whether the snapshotDirectoryCount field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotDirectoryCount() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * <code>optional uint64 snapshotDirectoryCount = 10;</code>
     * @return The snapshotDirectoryCount.
     */
    @java.lang.Override
    public long getSnapshotDirectoryCount() {
      return snapshotDirectoryCount_;
    }

    public static final int SNAPSHOTSPACECONSUMED_FIELD_NUMBER = 11;
    private long snapshotSpaceConsumed_ = 0L;
    /**
     * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
     * @return Whether the snapshotSpaceConsumed field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotSpaceConsumed() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
     * @return The snapshotSpaceConsumed.
     */
    @java.lang.Override
    public long getSnapshotSpaceConsumed() {
      return snapshotSpaceConsumed_;
    }

    public static final int ERASURECODINGPOLICY_FIELD_NUMBER = 12;
    @SuppressWarnings("serial")
    private volatile java.lang.Object erasureCodingPolicy_ = "";
    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return Whether the erasureCodingPolicy field is set.
     */
    @java.lang.Override
    public boolean hasErasureCodingPolicy() {
      return ((bitField0_ & 0x00000800) != 0);
    }
    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return The erasureCodingPolicy.
     */
    @java.lang.Override
    public java.lang.String getErasureCodingPolicy() {
      java.lang.Object ref = erasureCodingPolicy_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          erasureCodingPolicy_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string erasureCodingPolicy = 12;</code>
     * @return The bytes for erasureCodingPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getErasureCodingPolicyBytes() {
      java.lang.Object ref = erasureCodingPolicy_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        erasureCodingPolicy_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasLength()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFileCount()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDirectoryCount()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSpaceConsumed()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSpaceQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasTypeQuotaInfos()) {
        if (!getTypeQuotaInfos().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, length_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, fileCount_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, directoryCount_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, quota_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, spaceConsumed_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, spaceQuota_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeMessage(7, getTypeQuotaInfos());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeUInt64(8, snapshotLength_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeUInt64(9, snapshotFileCount_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeUInt64(10, snapshotDirectoryCount_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeUInt64(11, snapshotSpaceConsumed_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 12, erasureCodingPolicy_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, length_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, fileCount_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, directoryCount_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, quota_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, spaceConsumed_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, spaceQuota_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(7, getTypeQuotaInfos());
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(8, snapshotLength_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(9, snapshotFileCount_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(10, snapshotDirectoryCount_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(11, snapshotSpaceConsumed_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(12, erasureCodingPolicy_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) obj;

      if (hasLength() != other.hasLength()) return false;
      if (hasLength()) {
        if (getLength()
            != other.getLength()) return false;
      }
      if (hasFileCount() != other.hasFileCount()) return false;
      if (hasFileCount()) {
        if (getFileCount()
            != other.getFileCount()) return false;
      }
      if (hasDirectoryCount() != other.hasDirectoryCount()) return false;
      if (hasDirectoryCount()) {
        if (getDirectoryCount()
            != other.getDirectoryCount()) return false;
      }
      if (hasQuota() != other.hasQuota()) return false;
      if (hasQuota()) {
        if (getQuota()
            != other.getQuota()) return false;
      }
      if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false;
      if (hasSpaceConsumed()) {
        if (getSpaceConsumed()
            != other.getSpaceConsumed()) return false;
      }
      if (hasSpaceQuota() != other.hasSpaceQuota()) return false;
      if (hasSpaceQuota()) {
        if (getSpaceQuota()
            != other.getSpaceQuota()) return false;
      }
      if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false;
      if (hasTypeQuotaInfos()) {
        if (!getTypeQuotaInfos()
            .equals(other.getTypeQuotaInfos())) return false;
      }
      if (hasSnapshotLength() != other.hasSnapshotLength()) return false;
      if (hasSnapshotLength()) {
        if (getSnapshotLength()
            != other.getSnapshotLength()) return false;
      }
      if (hasSnapshotFileCount() != other.hasSnapshotFileCount()) return false;
      if (hasSnapshotFileCount()) {
        if (getSnapshotFileCount()
            != other.getSnapshotFileCount()) return false;
      }
      if (hasSnapshotDirectoryCount() != other.hasSnapshotDirectoryCount()) return false;
      if (hasSnapshotDirectoryCount()) {
        if (getSnapshotDirectoryCount()
            != other.getSnapshotDirectoryCount()) return false;
      }
      if (hasSnapshotSpaceConsumed() != other.hasSnapshotSpaceConsumed()) return false;
      if (hasSnapshotSpaceConsumed()) {
        if (getSnapshotSpaceConsumed()
            != other.getSnapshotSpaceConsumed()) return false;
      }
      if (hasErasureCodingPolicy() != other.hasErasureCodingPolicy()) return false;
      if (hasErasureCodingPolicy()) {
        if (!getErasureCodingPolicy()
            .equals(other.getErasureCodingPolicy())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasLength()) {
        hash = (37 * hash) + LENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLength());
      }
      if (hasFileCount()) {
        hash = (37 * hash) + FILECOUNT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileCount());
      }
      if (hasDirectoryCount()) {
        hash = (37 * hash) + DIRECTORYCOUNT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDirectoryCount());
      }
      if (hasQuota()) {
        hash = (37 * hash) + QUOTA_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getQuota());
      }
      if (hasSpaceConsumed()) {
        hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSpaceConsumed());
      }
      if (hasSpaceQuota()) {
        hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSpaceQuota());
      }
      if (hasTypeQuotaInfos()) {
        hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER;
        hash = (53 * hash) + getTypeQuotaInfos().hashCode();
      }
      if (hasSnapshotLength()) {
        hash = (37 * hash) + SNAPSHOTLENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSnapshotLength());
      }
      if (hasSnapshotFileCount()) {
        hash = (37 * hash) + SNAPSHOTFILECOUNT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSnapshotFileCount());
      }
      if (hasSnapshotDirectoryCount()) {
        hash = (37 * hash) + SNAPSHOTDIRECTORYCOUNT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSnapshotDirectoryCount());
      }
      if (hasSnapshotSpaceConsumed()) {
        hash = (37 * hash) + SNAPSHOTSPACECONSUMED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSnapshotSpaceConsumed());
      }
      if (hasErasureCodingPolicy()) {
        hash = (37 * hash) + ERASURECODINGPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getErasureCodingPolicy().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Summary of a file or directory
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ContentSummaryProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ContentSummaryProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getTypeQuotaInfosFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        length_ = 0L;
        fileCount_ = 0L;
        directoryCount_ = 0L;
        quota_ = 0L;
        spaceConsumed_ = 0L;
        spaceQuota_ = 0L;
        typeQuotaInfos_ = null;
        if (typeQuotaInfosBuilder_ != null) {
          typeQuotaInfosBuilder_.dispose();
          typeQuotaInfosBuilder_ = null;
        }
        snapshotLength_ = 0L;
        snapshotFileCount_ = 0L;
        snapshotDirectoryCount_ = 0L;
        snapshotSpaceConsumed_ = 0L;
        erasureCodingPolicy_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.length_ = length_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.fileCount_ = fileCount_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.directoryCount_ = directoryCount_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.quota_ = quota_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.spaceConsumed_ = spaceConsumed_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.spaceQuota_ = spaceQuota_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.typeQuotaInfos_ = typeQuotaInfosBuilder_ == null
              ? typeQuotaInfos_
              : typeQuotaInfosBuilder_.build();
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.snapshotLength_ = snapshotLength_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.snapshotFileCount_ = snapshotFileCount_;
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.snapshotDirectoryCount_ = snapshotDirectoryCount_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.snapshotSpaceConsumed_ = snapshotSpaceConsumed_;
          to_bitField0_ |= 0x00000400;
        }
        if (((from_bitField0_ & 0x00000800) != 0)) {
          result.erasureCodingPolicy_ = erasureCodingPolicy_;
          to_bitField0_ |= 0x00000800;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto.getDefaultInstance()) return this;
        if (other.hasLength()) {
          setLength(other.getLength());
        }
        if (other.hasFileCount()) {
          setFileCount(other.getFileCount());
        }
        if (other.hasDirectoryCount()) {
          setDirectoryCount(other.getDirectoryCount());
        }
        if (other.hasQuota()) {
          setQuota(other.getQuota());
        }
        if (other.hasSpaceConsumed()) {
          setSpaceConsumed(other.getSpaceConsumed());
        }
        if (other.hasSpaceQuota()) {
          setSpaceQuota(other.getSpaceQuota());
        }
        if (other.hasTypeQuotaInfos()) {
          mergeTypeQuotaInfos(other.getTypeQuotaInfos());
        }
        if (other.hasSnapshotLength()) {
          setSnapshotLength(other.getSnapshotLength());
        }
        if (other.hasSnapshotFileCount()) {
          setSnapshotFileCount(other.getSnapshotFileCount());
        }
        if (other.hasSnapshotDirectoryCount()) {
          setSnapshotDirectoryCount(other.getSnapshotDirectoryCount());
        }
        if (other.hasSnapshotSpaceConsumed()) {
          setSnapshotSpaceConsumed(other.getSnapshotSpaceConsumed());
        }
        if (other.hasErasureCodingPolicy()) {
          erasureCodingPolicy_ = other.erasureCodingPolicy_;
          bitField0_ |= 0x00000800;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasLength()) {
          return false;
        }
        if (!hasFileCount()) {
          return false;
        }
        if (!hasDirectoryCount()) {
          return false;
        }
        if (!hasQuota()) {
          return false;
        }
        if (!hasSpaceConsumed()) {
          return false;
        }
        if (!hasSpaceQuota()) {
          return false;
        }
        if (hasTypeQuotaInfos()) {
          if (!getTypeQuotaInfos().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                length_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                fileCount_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                directoryCount_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                quota_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                spaceConsumed_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                spaceQuota_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 58: {
                input.readMessage(
                    getTypeQuotaInfosFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              case 64: {
                snapshotLength_ = input.readUInt64();
                bitField0_ |= 0x00000080;
                break;
              } // case 64
              case 72: {
                snapshotFileCount_ = input.readUInt64();
                bitField0_ |= 0x00000100;
                break;
              } // case 72
              case 80: {
                snapshotDirectoryCount_ = input.readUInt64();
                bitField0_ |= 0x00000200;
                break;
              } // case 80
              case 88: {
                snapshotSpaceConsumed_ = input.readUInt64();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              case 98: {
                erasureCodingPolicy_ = input.readBytes();
                bitField0_ |= 0x00000800;
                break;
              } // case 98
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long length_ ;
      /**
       * <code>required uint64 length = 1;</code>
       * @return Whether the length field is set.
       */
      @java.lang.Override
      public boolean hasLength() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 length = 1;</code>
       * @return The length.
       */
      @java.lang.Override
      public long getLength() {
        return length_;
      }
      /**
       * <code>required uint64 length = 1;</code>
       * @param value The length to set.
       * @return This builder for chaining.
       */
      public Builder setLength(long value) {

        length_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 length = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearLength() {
        bitField0_ = (bitField0_ & ~0x00000001);
        length_ = 0L;
        onChanged();
        return this;
      }

      private long fileCount_ ;
      /**
       * <code>required uint64 fileCount = 2;</code>
       * @return Whether the fileCount field is set.
       */
      @java.lang.Override
      public boolean hasFileCount() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 fileCount = 2;</code>
       * @return The fileCount.
       */
      @java.lang.Override
      public long getFileCount() {
        return fileCount_;
      }
      /**
       * <code>required uint64 fileCount = 2;</code>
       * @param value The fileCount to set.
       * @return This builder for chaining.
       */
      public Builder setFileCount(long value) {

        fileCount_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 fileCount = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileCount() {
        bitField0_ = (bitField0_ & ~0x00000002);
        fileCount_ = 0L;
        onChanged();
        return this;
      }

      private long directoryCount_ ;
      /**
       * <code>required uint64 directoryCount = 3;</code>
       * @return Whether the directoryCount field is set.
       */
      @java.lang.Override
      public boolean hasDirectoryCount() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 directoryCount = 3;</code>
       * @return The directoryCount.
       */
      @java.lang.Override
      public long getDirectoryCount() {
        return directoryCount_;
      }
      /**
       * <code>required uint64 directoryCount = 3;</code>
       * @param value The directoryCount to set.
       * @return This builder for chaining.
       */
      public Builder setDirectoryCount(long value) {

        directoryCount_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 directoryCount = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearDirectoryCount() {
        bitField0_ = (bitField0_ & ~0x00000004);
        directoryCount_ = 0L;
        onChanged();
        return this;
      }

      private long quota_ ;
      /**
       * <code>required uint64 quota = 4;</code>
       * @return Whether the quota field is set.
       */
      @java.lang.Override
      public boolean hasQuota() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required uint64 quota = 4;</code>
       * @return The quota.
       */
      @java.lang.Override
      public long getQuota() {
        return quota_;
      }
      /**
       * <code>required uint64 quota = 4;</code>
       * @param value The quota to set.
       * @return This builder for chaining.
       */
      public Builder setQuota(long value) {

        quota_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 quota = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearQuota() {
        bitField0_ = (bitField0_ & ~0x00000008);
        quota_ = 0L;
        onChanged();
        return this;
      }

      private long spaceConsumed_ ;
      /**
       * <code>required uint64 spaceConsumed = 5;</code>
       * @return Whether the spaceConsumed field is set.
       */
      @java.lang.Override
      public boolean hasSpaceConsumed() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required uint64 spaceConsumed = 5;</code>
       * @return The spaceConsumed.
       */
      @java.lang.Override
      public long getSpaceConsumed() {
        return spaceConsumed_;
      }
      /**
       * <code>required uint64 spaceConsumed = 5;</code>
       * @param value The spaceConsumed to set.
       * @return This builder for chaining.
       */
      public Builder setSpaceConsumed(long value) {

        spaceConsumed_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 spaceConsumed = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearSpaceConsumed() {
        bitField0_ = (bitField0_ & ~0x00000010);
        spaceConsumed_ = 0L;
        onChanged();
        return this;
      }

      private long spaceQuota_ ;
      /**
       * <code>required uint64 spaceQuota = 6;</code>
       * @return Whether the spaceQuota field is set.
       */
      @java.lang.Override
      public boolean hasSpaceQuota() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required uint64 spaceQuota = 6;</code>
       * @return The spaceQuota.
       */
      @java.lang.Override
      public long getSpaceQuota() {
        return spaceQuota_;
      }
      /**
       * <code>required uint64 spaceQuota = 6;</code>
       * @param value The spaceQuota to set.
       * @return This builder for chaining.
       */
      public Builder setSpaceQuota(long value) {

        spaceQuota_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 spaceQuota = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearSpaceQuota() {
        bitField0_ = (bitField0_ & ~0x00000020);
        spaceQuota_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       * @return Whether the typeQuotaInfos field is set.
       */
      public boolean hasTypeQuotaInfos() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       * @return The typeQuotaInfos.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
        if (typeQuotaInfosBuilder_ == null) {
          return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
        } else {
          return typeQuotaInfosBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
        if (typeQuotaInfosBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          typeQuotaInfos_ = value;
        } else {
          typeQuotaInfosBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public Builder setTypeQuotaInfos(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) {
        if (typeQuotaInfosBuilder_ == null) {
          typeQuotaInfos_ = builderForValue.build();
        } else {
          typeQuotaInfosBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
        if (typeQuotaInfosBuilder_ == null) {
          if (((bitField0_ & 0x00000040) != 0) &&
            typeQuotaInfos_ != null &&
            typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) {
            getTypeQuotaInfosBuilder().mergeFrom(value);
          } else {
            typeQuotaInfos_ = value;
          }
        } else {
          typeQuotaInfosBuilder_.mergeFrom(value);
        }
        if (typeQuotaInfos_ != null) {
          bitField0_ |= 0x00000040;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public Builder clearTypeQuotaInfos() {
        bitField0_ = (bitField0_ & ~0x00000040);
        typeQuotaInfos_ = null;
        if (typeQuotaInfosBuilder_ != null) {
          typeQuotaInfosBuilder_.dispose();
          typeQuotaInfosBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() {
        bitField0_ |= 0x00000040;
        onChanged();
        return getTypeQuotaInfosFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
        if (typeQuotaInfosBuilder_ != null) {
          return typeQuotaInfosBuilder_.getMessageOrBuilder();
        } else {
          return typeQuotaInfos_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 7;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> 
          getTypeQuotaInfosFieldBuilder() {
        if (typeQuotaInfosBuilder_ == null) {
          typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>(
                  getTypeQuotaInfos(),
                  getParentForChildren(),
                  isClean());
          typeQuotaInfos_ = null;
        }
        return typeQuotaInfosBuilder_;
      }

      private long snapshotLength_ ;
      /**
       * <code>optional uint64 snapshotLength = 8;</code>
       * @return Whether the snapshotLength field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotLength() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional uint64 snapshotLength = 8;</code>
       * @return The snapshotLength.
       */
      @java.lang.Override
      public long getSnapshotLength() {
        return snapshotLength_;
      }
      /**
       * <code>optional uint64 snapshotLength = 8;</code>
       * @param value The snapshotLength to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotLength(long value) {

        snapshotLength_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 snapshotLength = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotLength() {
        bitField0_ = (bitField0_ & ~0x00000080);
        snapshotLength_ = 0L;
        onChanged();
        return this;
      }

      private long snapshotFileCount_ ;
      /**
       * <code>optional uint64 snapshotFileCount = 9;</code>
       * @return Whether the snapshotFileCount field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotFileCount() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional uint64 snapshotFileCount = 9;</code>
       * @return The snapshotFileCount.
       */
      @java.lang.Override
      public long getSnapshotFileCount() {
        return snapshotFileCount_;
      }
      /**
       * <code>optional uint64 snapshotFileCount = 9;</code>
       * @param value The snapshotFileCount to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotFileCount(long value) {

        snapshotFileCount_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 snapshotFileCount = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotFileCount() {
        bitField0_ = (bitField0_ & ~0x00000100);
        snapshotFileCount_ = 0L;
        onChanged();
        return this;
      }

      private long snapshotDirectoryCount_ ;
      /**
       * <code>optional uint64 snapshotDirectoryCount = 10;</code>
       * @return Whether the snapshotDirectoryCount field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotDirectoryCount() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <code>optional uint64 snapshotDirectoryCount = 10;</code>
       * @return The snapshotDirectoryCount.
       */
      @java.lang.Override
      public long getSnapshotDirectoryCount() {
        return snapshotDirectoryCount_;
      }
      /**
       * <code>optional uint64 snapshotDirectoryCount = 10;</code>
       * @param value The snapshotDirectoryCount to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotDirectoryCount(long value) {

        snapshotDirectoryCount_ = value;
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 snapshotDirectoryCount = 10;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotDirectoryCount() {
        bitField0_ = (bitField0_ & ~0x00000200);
        snapshotDirectoryCount_ = 0L;
        onChanged();
        return this;
      }

      private long snapshotSpaceConsumed_ ;
      /**
       * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
       * @return Whether the snapshotSpaceConsumed field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotSpaceConsumed() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
       * @return The snapshotSpaceConsumed.
       */
      @java.lang.Override
      public long getSnapshotSpaceConsumed() {
        return snapshotSpaceConsumed_;
      }
      /**
       * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
       * @param value The snapshotSpaceConsumed to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotSpaceConsumed(long value) {

        snapshotSpaceConsumed_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 snapshotSpaceConsumed = 11;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotSpaceConsumed() {
        bitField0_ = (bitField0_ & ~0x00000400);
        snapshotSpaceConsumed_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object erasureCodingPolicy_ = "";
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @return Whether the erasureCodingPolicy field is set.
       */
      public boolean hasErasureCodingPolicy() {
        return ((bitField0_ & 0x00000800) != 0);
      }
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @return The erasureCodingPolicy.
       */
      public java.lang.String getErasureCodingPolicy() {
        java.lang.Object ref = erasureCodingPolicy_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            erasureCodingPolicy_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @return The bytes for erasureCodingPolicy.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getErasureCodingPolicyBytes() {
        java.lang.Object ref = erasureCodingPolicy_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          erasureCodingPolicy_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @param value The erasureCodingPolicy to set.
       * @return This builder for chaining.
       */
      public Builder setErasureCodingPolicy(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        erasureCodingPolicy_ = value;
        bitField0_ |= 0x00000800;
        onChanged();
        return this;
      }
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @return This builder for chaining.
       */
      public Builder clearErasureCodingPolicy() {
        erasureCodingPolicy_ = getDefaultInstance().getErasureCodingPolicy();
        bitField0_ = (bitField0_ & ~0x00000800);
        onChanged();
        return this;
      }
      /**
       * <code>optional string erasureCodingPolicy = 12;</code>
       * @param value The bytes for erasureCodingPolicy to set.
       * @return This builder for chaining.
       */
      public Builder setErasureCodingPolicyBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        erasureCodingPolicy_ = value;
        bitField0_ |= 0x00000800;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ContentSummaryProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ContentSummaryProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ContentSummaryProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ContentSummaryProto>() {
      @java.lang.Override
      public ContentSummaryProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ContentSummaryProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ContentSummaryProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface QuotaUsageProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.QuotaUsageProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 fileAndDirectoryCount = 1;</code>
     * @return Whether the fileAndDirectoryCount field is set.
     */
    boolean hasFileAndDirectoryCount();
    /**
     * <code>required uint64 fileAndDirectoryCount = 1;</code>
     * @return The fileAndDirectoryCount.
     */
    long getFileAndDirectoryCount();

    /**
     * <code>required uint64 quota = 2;</code>
     * @return Whether the quota field is set.
     */
    boolean hasQuota();
    /**
     * <code>required uint64 quota = 2;</code>
     * @return The quota.
     */
    long getQuota();

    /**
     * <code>required uint64 spaceConsumed = 3;</code>
     * @return Whether the spaceConsumed field is set.
     */
    boolean hasSpaceConsumed();
    /**
     * <code>required uint64 spaceConsumed = 3;</code>
     * @return The spaceConsumed.
     */
    long getSpaceConsumed();

    /**
     * <code>required uint64 spaceQuota = 4;</code>
     * @return Whether the spaceQuota field is set.
     */
    boolean hasSpaceQuota();
    /**
     * <code>required uint64 spaceQuota = 4;</code>
     * @return The spaceQuota.
     */
    long getSpaceQuota();

    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     * @return Whether the typeQuotaInfos field is set.
     */
    boolean hasTypeQuotaInfos();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     * @return The typeQuotaInfos.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder();
  }
  /**
   * <pre>
   **
   * Summary of quota usage of a directory
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.QuotaUsageProto}
   */
  public static final class QuotaUsageProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.QuotaUsageProto)
      QuotaUsageProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use QuotaUsageProto.newBuilder() to construct.
    private QuotaUsageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private QuotaUsageProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new QuotaUsageProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class);
    }

    private int bitField0_;
    public static final int FILEANDDIRECTORYCOUNT_FIELD_NUMBER = 1;
    private long fileAndDirectoryCount_ = 0L;
    /**
     * <code>required uint64 fileAndDirectoryCount = 1;</code>
     * @return Whether the fileAndDirectoryCount field is set.
     */
    @java.lang.Override
    public boolean hasFileAndDirectoryCount() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 fileAndDirectoryCount = 1;</code>
     * @return The fileAndDirectoryCount.
     */
    @java.lang.Override
    public long getFileAndDirectoryCount() {
      return fileAndDirectoryCount_;
    }

    public static final int QUOTA_FIELD_NUMBER = 2;
    private long quota_ = 0L;
    /**
     * <code>required uint64 quota = 2;</code>
     * @return Whether the quota field is set.
     */
    @java.lang.Override
    public boolean hasQuota() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 quota = 2;</code>
     * @return The quota.
     */
    @java.lang.Override
    public long getQuota() {
      return quota_;
    }

    public static final int SPACECONSUMED_FIELD_NUMBER = 3;
    private long spaceConsumed_ = 0L;
    /**
     * <code>required uint64 spaceConsumed = 3;</code>
     * @return Whether the spaceConsumed field is set.
     */
    @java.lang.Override
    public boolean hasSpaceConsumed() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 spaceConsumed = 3;</code>
     * @return The spaceConsumed.
     */
    @java.lang.Override
    public long getSpaceConsumed() {
      return spaceConsumed_;
    }

    public static final int SPACEQUOTA_FIELD_NUMBER = 4;
    private long spaceQuota_ = 0L;
    /**
     * <code>required uint64 spaceQuota = 4;</code>
     * @return Whether the spaceQuota field is set.
     */
    @java.lang.Override
    public boolean hasSpaceQuota() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required uint64 spaceQuota = 4;</code>
     * @return The spaceQuota.
     */
    @java.lang.Override
    public long getSpaceQuota() {
      return spaceQuota_;
    }

    public static final int TYPEQUOTAINFOS_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     * @return Whether the typeQuotaInfos field is set.
     */
    @java.lang.Override
    public boolean hasTypeQuotaInfos() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     * @return The typeQuotaInfos.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
      return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
      return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasFileAndDirectoryCount()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSpaceConsumed()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSpaceQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasTypeQuotaInfos()) {
        if (!getTypeQuotaInfos().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, fileAndDirectoryCount_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, quota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, spaceConsumed_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(4, spaceQuota_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(5, getTypeQuotaInfos());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, fileAndDirectoryCount_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, quota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, spaceConsumed_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(4, spaceQuota_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getTypeQuotaInfos());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) obj;

      if (hasFileAndDirectoryCount() != other.hasFileAndDirectoryCount()) return false;
      if (hasFileAndDirectoryCount()) {
        if (getFileAndDirectoryCount()
            != other.getFileAndDirectoryCount()) return false;
      }
      if (hasQuota() != other.hasQuota()) return false;
      if (hasQuota()) {
        if (getQuota()
            != other.getQuota()) return false;
      }
      if (hasSpaceConsumed() != other.hasSpaceConsumed()) return false;
      if (hasSpaceConsumed()) {
        if (getSpaceConsumed()
            != other.getSpaceConsumed()) return false;
      }
      if (hasSpaceQuota() != other.hasSpaceQuota()) return false;
      if (hasSpaceQuota()) {
        if (getSpaceQuota()
            != other.getSpaceQuota()) return false;
      }
      if (hasTypeQuotaInfos() != other.hasTypeQuotaInfos()) return false;
      if (hasTypeQuotaInfos()) {
        if (!getTypeQuotaInfos()
            .equals(other.getTypeQuotaInfos())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasFileAndDirectoryCount()) {
        hash = (37 * hash) + FILEANDDIRECTORYCOUNT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileAndDirectoryCount());
      }
      if (hasQuota()) {
        hash = (37 * hash) + QUOTA_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getQuota());
      }
      if (hasSpaceConsumed()) {
        hash = (37 * hash) + SPACECONSUMED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSpaceConsumed());
      }
      if (hasSpaceQuota()) {
        hash = (37 * hash) + SPACEQUOTA_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSpaceQuota());
      }
      if (hasTypeQuotaInfos()) {
        hash = (37 * hash) + TYPEQUOTAINFOS_FIELD_NUMBER;
        hash = (53 * hash) + getTypeQuotaInfos().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Summary of quota usage of a directory
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.QuotaUsageProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.QuotaUsageProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getTypeQuotaInfosFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        fileAndDirectoryCount_ = 0L;
        quota_ = 0L;
        spaceConsumed_ = 0L;
        spaceQuota_ = 0L;
        typeQuotaInfos_ = null;
        if (typeQuotaInfosBuilder_ != null) {
          typeQuotaInfosBuilder_.dispose();
          typeQuotaInfosBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.fileAndDirectoryCount_ = fileAndDirectoryCount_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.quota_ = quota_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.spaceConsumed_ = spaceConsumed_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.spaceQuota_ = spaceQuota_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.typeQuotaInfos_ = typeQuotaInfosBuilder_ == null
              ? typeQuotaInfos_
              : typeQuotaInfosBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto.getDefaultInstance()) return this;
        if (other.hasFileAndDirectoryCount()) {
          setFileAndDirectoryCount(other.getFileAndDirectoryCount());
        }
        if (other.hasQuota()) {
          setQuota(other.getQuota());
        }
        if (other.hasSpaceConsumed()) {
          setSpaceConsumed(other.getSpaceConsumed());
        }
        if (other.hasSpaceQuota()) {
          setSpaceQuota(other.getSpaceQuota());
        }
        if (other.hasTypeQuotaInfos()) {
          mergeTypeQuotaInfos(other.getTypeQuotaInfos());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasFileAndDirectoryCount()) {
          return false;
        }
        if (!hasQuota()) {
          return false;
        }
        if (!hasSpaceConsumed()) {
          return false;
        }
        if (!hasSpaceQuota()) {
          return false;
        }
        if (hasTypeQuotaInfos()) {
          if (!getTypeQuotaInfos().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                fileAndDirectoryCount_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                quota_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                spaceConsumed_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                spaceQuota_ = input.readUInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 42: {
                input.readMessage(
                    getTypeQuotaInfosFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long fileAndDirectoryCount_ ;
      /**
       * <code>required uint64 fileAndDirectoryCount = 1;</code>
       * @return Whether the fileAndDirectoryCount field is set.
       */
      @java.lang.Override
      public boolean hasFileAndDirectoryCount() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 fileAndDirectoryCount = 1;</code>
       * @return The fileAndDirectoryCount.
       */
      @java.lang.Override
      public long getFileAndDirectoryCount() {
        return fileAndDirectoryCount_;
      }
      /**
       * <code>required uint64 fileAndDirectoryCount = 1;</code>
       * @param value The fileAndDirectoryCount to set.
       * @return This builder for chaining.
       */
      public Builder setFileAndDirectoryCount(long value) {

        fileAndDirectoryCount_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 fileAndDirectoryCount = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileAndDirectoryCount() {
        bitField0_ = (bitField0_ & ~0x00000001);
        fileAndDirectoryCount_ = 0L;
        onChanged();
        return this;
      }

      private long quota_ ;
      /**
       * <code>required uint64 quota = 2;</code>
       * @return Whether the quota field is set.
       */
      @java.lang.Override
      public boolean hasQuota() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @return The quota.
       */
      @java.lang.Override
      public long getQuota() {
        return quota_;
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @param value The quota to set.
       * @return This builder for chaining.
       */
      public Builder setQuota(long value) {

        quota_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearQuota() {
        bitField0_ = (bitField0_ & ~0x00000002);
        quota_ = 0L;
        onChanged();
        return this;
      }

      private long spaceConsumed_ ;
      /**
       * <code>required uint64 spaceConsumed = 3;</code>
       * @return Whether the spaceConsumed field is set.
       */
      @java.lang.Override
      public boolean hasSpaceConsumed() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 spaceConsumed = 3;</code>
       * @return The spaceConsumed.
       */
      @java.lang.Override
      public long getSpaceConsumed() {
        return spaceConsumed_;
      }
      /**
       * <code>required uint64 spaceConsumed = 3;</code>
       * @param value The spaceConsumed to set.
       * @return This builder for chaining.
       */
      public Builder setSpaceConsumed(long value) {

        spaceConsumed_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 spaceConsumed = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearSpaceConsumed() {
        bitField0_ = (bitField0_ & ~0x00000004);
        spaceConsumed_ = 0L;
        onChanged();
        return this;
      }

      private long spaceQuota_ ;
      /**
       * <code>required uint64 spaceQuota = 4;</code>
       * @return Whether the spaceQuota field is set.
       */
      @java.lang.Override
      public boolean hasSpaceQuota() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required uint64 spaceQuota = 4;</code>
       * @return The spaceQuota.
       */
      @java.lang.Override
      public long getSpaceQuota() {
        return spaceQuota_;
      }
      /**
       * <code>required uint64 spaceQuota = 4;</code>
       * @param value The spaceQuota to set.
       * @return This builder for chaining.
       */
      public Builder setSpaceQuota(long value) {

        spaceQuota_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 spaceQuota = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearSpaceQuota() {
        bitField0_ = (bitField0_ & ~0x00000008);
        spaceQuota_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> typeQuotaInfosBuilder_;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       * @return Whether the typeQuotaInfos field is set.
       */
      public boolean hasTypeQuotaInfos() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       * @return The typeQuotaInfos.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getTypeQuotaInfos() {
        if (typeQuotaInfosBuilder_ == null) {
          return typeQuotaInfos_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
        } else {
          return typeQuotaInfosBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public Builder setTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
        if (typeQuotaInfosBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          typeQuotaInfos_ = value;
        } else {
          typeQuotaInfosBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public Builder setTypeQuotaInfos(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder builderForValue) {
        if (typeQuotaInfosBuilder_ == null) {
          typeQuotaInfos_ = builderForValue.build();
        } else {
          typeQuotaInfosBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public Builder mergeTypeQuotaInfos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto value) {
        if (typeQuotaInfosBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            typeQuotaInfos_ != null &&
            typeQuotaInfos_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) {
            getTypeQuotaInfosBuilder().mergeFrom(value);
          } else {
            typeQuotaInfos_ = value;
          }
        } else {
          typeQuotaInfosBuilder_.mergeFrom(value);
        }
        if (typeQuotaInfos_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public Builder clearTypeQuotaInfos() {
        bitField0_ = (bitField0_ & ~0x00000010);
        typeQuotaInfos_ = null;
        if (typeQuotaInfosBuilder_ != null) {
          typeQuotaInfosBuilder_.dispose();
          typeQuotaInfosBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder getTypeQuotaInfosBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getTypeQuotaInfosFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder getTypeQuotaInfosOrBuilder() {
        if (typeQuotaInfosBuilder_ != null) {
          return typeQuotaInfosBuilder_.getMessageOrBuilder();
        } else {
          return typeQuotaInfos_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance() : typeQuotaInfos_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeQuotaInfosProto typeQuotaInfos = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder> 
          getTypeQuotaInfosFieldBuilder() {
        if (typeQuotaInfosBuilder_ == null) {
          typeQuotaInfosBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder>(
                  getTypeQuotaInfos(),
                  getParentForChildren(),
                  isClean());
          typeQuotaInfos_ = null;
        }
        return typeQuotaInfosBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.QuotaUsageProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.QuotaUsageProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<QuotaUsageProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<QuotaUsageProto>() {
      @java.lang.Override
      public QuotaUsageProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<QuotaUsageProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<QuotaUsageProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageTypeQuotaInfosProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfosProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> 
        getTypeQuotaInfoList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index);
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    int getTypeQuotaInfoCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> 
        getTypeQuotaInfoOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Storage type quota and usage information of a file or directory
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
   */
  public static final class StorageTypeQuotaInfosProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfosProto)
      StorageTypeQuotaInfosProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageTypeQuotaInfosProto.newBuilder() to construct.
    private StorageTypeQuotaInfosProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageTypeQuotaInfosProto() {
      typeQuotaInfo_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageTypeQuotaInfosProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
    }

    public static final int TYPEQUOTAINFO_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> typeQuotaInfo_;
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> getTypeQuotaInfoList() {
      return typeQuotaInfo_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> 
        getTypeQuotaInfoOrBuilderList() {
      return typeQuotaInfo_;
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    @java.lang.Override
    public int getTypeQuotaInfoCount() {
      return typeQuotaInfo_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
      return typeQuotaInfo_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
        int index) {
      return typeQuotaInfo_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
        if (!getTypeQuotaInfo(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < typeQuotaInfo_.size(); i++) {
        output.writeMessage(1, typeQuotaInfo_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < typeQuotaInfo_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, typeQuotaInfo_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) obj;

      if (!getTypeQuotaInfoList()
          .equals(other.getTypeQuotaInfoList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getTypeQuotaInfoCount() > 0) {
        hash = (37 * hash) + TYPEQUOTAINFO_FIELD_NUMBER;
        hash = (53 * hash) + getTypeQuotaInfoList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Storage type quota and usage information of a file or directory
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfosProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfosProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (typeQuotaInfoBuilder_ == null) {
          typeQuotaInfo_ = java.util.Collections.emptyList();
        } else {
          typeQuotaInfo_ = null;
          typeQuotaInfoBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result) {
        if (typeQuotaInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            typeQuotaInfo_ = java.util.Collections.unmodifiableList(typeQuotaInfo_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.typeQuotaInfo_ = typeQuotaInfo_;
        } else {
          result.typeQuotaInfo_ = typeQuotaInfoBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto.getDefaultInstance()) return this;
        if (typeQuotaInfoBuilder_ == null) {
          if (!other.typeQuotaInfo_.isEmpty()) {
            if (typeQuotaInfo_.isEmpty()) {
              typeQuotaInfo_ = other.typeQuotaInfo_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureTypeQuotaInfoIsMutable();
              typeQuotaInfo_.addAll(other.typeQuotaInfo_);
            }
            onChanged();
          }
        } else {
          if (!other.typeQuotaInfo_.isEmpty()) {
            if (typeQuotaInfoBuilder_.isEmpty()) {
              typeQuotaInfoBuilder_.dispose();
              typeQuotaInfoBuilder_ = null;
              typeQuotaInfo_ = other.typeQuotaInfo_;
              bitField0_ = (bitField0_ & ~0x00000001);
              typeQuotaInfoBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTypeQuotaInfoFieldBuilder() : null;
            } else {
              typeQuotaInfoBuilder_.addAllMessages(other.typeQuotaInfo_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getTypeQuotaInfoCount(); i++) {
          if (!getTypeQuotaInfo(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.PARSER,
                        extensionRegistry);
                if (typeQuotaInfoBuilder_ == null) {
                  ensureTypeQuotaInfoIsMutable();
                  typeQuotaInfo_.add(m);
                } else {
                  typeQuotaInfoBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> typeQuotaInfo_ =
        java.util.Collections.emptyList();
      private void ensureTypeQuotaInfoIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          typeQuotaInfo_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto>(typeQuotaInfo_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> typeQuotaInfoBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> getTypeQuotaInfoList() {
        if (typeQuotaInfoBuilder_ == null) {
          return java.util.Collections.unmodifiableList(typeQuotaInfo_);
        } else {
          return typeQuotaInfoBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public int getTypeQuotaInfoCount() {
        if (typeQuotaInfoBuilder_ == null) {
          return typeQuotaInfo_.size();
        } else {
          return typeQuotaInfoBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getTypeQuotaInfo(int index) {
        if (typeQuotaInfoBuilder_ == null) {
          return typeQuotaInfo_.get(index);
        } else {
          return typeQuotaInfoBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder setTypeQuotaInfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
        if (typeQuotaInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.set(index, value);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder setTypeQuotaInfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
        if (typeQuotaInfoBuilder_ == null) {
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.set(index, builderForValue.build());
          onChanged();
        } else {
          typeQuotaInfoBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder addTypeQuotaInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
        if (typeQuotaInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.add(value);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder addTypeQuotaInfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto value) {
        if (typeQuotaInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.add(index, value);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder addTypeQuotaInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
        if (typeQuotaInfoBuilder_ == null) {
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.add(builderForValue.build());
          onChanged();
        } else {
          typeQuotaInfoBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder addTypeQuotaInfo(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder builderForValue) {
        if (typeQuotaInfoBuilder_ == null) {
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.add(index, builderForValue.build());
          onChanged();
        } else {
          typeQuotaInfoBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder addAllTypeQuotaInfo(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto> values) {
        if (typeQuotaInfoBuilder_ == null) {
          ensureTypeQuotaInfoIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, typeQuotaInfo_);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder clearTypeQuotaInfo() {
        if (typeQuotaInfoBuilder_ == null) {
          typeQuotaInfo_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public Builder removeTypeQuotaInfo(int index) {
        if (typeQuotaInfoBuilder_ == null) {
          ensureTypeQuotaInfoIsMutable();
          typeQuotaInfo_.remove(index);
          onChanged();
        } else {
          typeQuotaInfoBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder getTypeQuotaInfoBuilder(
          int index) {
        return getTypeQuotaInfoFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder getTypeQuotaInfoOrBuilder(
          int index) {
        if (typeQuotaInfoBuilder_ == null) {
          return typeQuotaInfo_.get(index);  } else {
          return typeQuotaInfoBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> 
           getTypeQuotaInfoOrBuilderList() {
        if (typeQuotaInfoBuilder_ != null) {
          return typeQuotaInfoBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(typeQuotaInfo_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder() {
        return getTypeQuotaInfoFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder addTypeQuotaInfoBuilder(
          int index) {
        return getTypeQuotaInfoFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeQuotaInfoProto typeQuotaInfo = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder> 
           getTypeQuotaInfoBuilderList() {
        return getTypeQuotaInfoFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder> 
          getTypeQuotaInfoFieldBuilder() {
        if (typeQuotaInfoBuilder_ == null) {
          typeQuotaInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder>(
                  typeQuotaInfo_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          typeQuotaInfo_ = null;
        }
        return typeQuotaInfoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfosProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfosProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageTypeQuotaInfosProto>() {
      @java.lang.Override
      public StorageTypeQuotaInfosProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfosProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfosProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfosProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageTypeQuotaInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypeQuotaInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
     * @return Whether the type field is set.
     */
    boolean hasType();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
     * @return The type.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType();

    /**
     * <code>required uint64 quota = 2;</code>
     * @return Whether the quota field is set.
     */
    boolean hasQuota();
    /**
     * <code>required uint64 quota = 2;</code>
     * @return The quota.
     */
    long getQuota();

    /**
     * <code>required uint64 consumed = 3;</code>
     * @return Whether the consumed field is set.
     */
    boolean hasConsumed();
    /**
     * <code>required uint64 consumed = 3;</code>
     * @return The consumed.
     */
    long getConsumed();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
   */
  public static final class StorageTypeQuotaInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypeQuotaInfoProto)
      StorageTypeQuotaInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageTypeQuotaInfoProto.newBuilder() to construct.
    private StorageTypeQuotaInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageTypeQuotaInfoProto() {
      type_ = 1;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageTypeQuotaInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int TYPE_FIELD_NUMBER = 1;
    private int type_ = 1;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
     * @return Whether the type field is set.
     */
    @java.lang.Override public boolean hasType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
     * @return The type.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(type_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
    }

    public static final int QUOTA_FIELD_NUMBER = 2;
    private long quota_ = 0L;
    /**
     * <code>required uint64 quota = 2;</code>
     * @return Whether the quota field is set.
     */
    @java.lang.Override
    public boolean hasQuota() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 quota = 2;</code>
     * @return The quota.
     */
    @java.lang.Override
    public long getQuota() {
      return quota_;
    }

    public static final int CONSUMED_FIELD_NUMBER = 3;
    private long consumed_ = 0L;
    /**
     * <code>required uint64 consumed = 3;</code>
     * @return Whether the consumed field is set.
     */
    @java.lang.Override
    public boolean hasConsumed() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 consumed = 3;</code>
     * @return The consumed.
     */
    @java.lang.Override
    public long getConsumed() {
      return consumed_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasConsumed()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, quota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, consumed_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, quota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, consumed_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) obj;

      if (hasType() != other.hasType()) return false;
      if (hasType()) {
        if (type_ != other.type_) return false;
      }
      if (hasQuota() != other.hasQuota()) return false;
      if (hasQuota()) {
        if (getQuota()
            != other.getQuota()) return false;
      }
      if (hasConsumed() != other.hasConsumed()) return false;
      if (hasConsumed()) {
        if (getConsumed()
            != other.getConsumed()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasType()) {
        hash = (37 * hash) + TYPE_FIELD_NUMBER;
        hash = (53 * hash) + type_;
      }
      if (hasQuota()) {
        hash = (37 * hash) + QUOTA_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getQuota());
      }
      if (hasConsumed()) {
        hash = (37 * hash) + CONSUMED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getConsumed());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.StorageTypeQuotaInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypeQuotaInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        type_ = 1;
        quota_ = 0L;
        consumed_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.type_ = type_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.quota_ = quota_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.consumed_ = consumed_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto.getDefaultInstance()) return this;
        if (other.hasType()) {
          setType(other.getType());
        }
        if (other.hasQuota()) {
          setQuota(other.getQuota());
        }
        if (other.hasConsumed()) {
          setConsumed(other.getConsumed());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasQuota()) {
          return false;
        }
        if (!hasConsumed()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  type_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 16: {
                quota_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                consumed_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int type_ = 1;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
       * @return The type.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
       * @param value The type to set.
       * @return This builder for chaining.
       */
      public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        type_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto type = 1 [default = DISK];</code>
       * @return This builder for chaining.
       */
      public Builder clearType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        type_ = 1;
        onChanged();
        return this;
      }

      private long quota_ ;
      /**
       * <code>required uint64 quota = 2;</code>
       * @return Whether the quota field is set.
       */
      @java.lang.Override
      public boolean hasQuota() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @return The quota.
       */
      @java.lang.Override
      public long getQuota() {
        return quota_;
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @param value The quota to set.
       * @return This builder for chaining.
       */
      public Builder setQuota(long value) {

        quota_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 quota = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearQuota() {
        bitField0_ = (bitField0_ & ~0x00000002);
        quota_ = 0L;
        onChanged();
        return this;
      }

      private long consumed_ ;
      /**
       * <code>required uint64 consumed = 3;</code>
       * @return Whether the consumed field is set.
       */
      @java.lang.Override
      public boolean hasConsumed() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 consumed = 3;</code>
       * @return The consumed.
       */
      @java.lang.Override
      public long getConsumed() {
        return consumed_;
      }
      /**
       * <code>required uint64 consumed = 3;</code>
       * @param value The consumed to set.
       * @return This builder for chaining.
       */
      public Builder setConsumed(long value) {

        consumed_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 consumed = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearConsumed() {
        bitField0_ = (bitField0_ & ~0x00000004);
        consumed_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypeQuotaInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageTypeQuotaInfoProto>() {
      @java.lang.Override
      public StorageTypeQuotaInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypeQuotaInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeQuotaInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CorruptFileBlocksProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CorruptFileBlocksProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string files = 1;</code>
     * @return A list containing the files.
     */
    java.util.List<java.lang.String>
        getFilesList();
    /**
     * <code>repeated string files = 1;</code>
     * @return The count of files.
     */
    int getFilesCount();
    /**
     * <code>repeated string files = 1;</code>
     * @param index The index of the element to return.
     * @return The files at the given index.
     */
    java.lang.String getFiles(int index);
    /**
     * <code>repeated string files = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the files at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getFilesBytes(int index);

    /**
     * <code>required string cookie = 2;</code>
     * @return Whether the cookie field is set.
     */
    boolean hasCookie();
    /**
     * <code>required string cookie = 2;</code>
     * @return The cookie.
     */
    java.lang.String getCookie();
    /**
     * <code>required string cookie = 2;</code>
     * @return The bytes for cookie.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCookieBytes();
  }
  /**
   * <pre>
   **
   * Contains a list of paths corresponding to corrupt files and a cookie
   * used for iterative calls to NameNode.listCorruptFileBlocks.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
   */
  public static final class CorruptFileBlocksProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.CorruptFileBlocksProto)
      CorruptFileBlocksProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CorruptFileBlocksProto.newBuilder() to construct.
    private CorruptFileBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CorruptFileBlocksProto() {
      files_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      cookie_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CorruptFileBlocksProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
    }

    private int bitField0_;
    public static final int FILES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList files_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string files = 1;</code>
     * @return A list containing the files.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getFilesList() {
      return files_;
    }
    /**
     * <code>repeated string files = 1;</code>
     * @return The count of files.
     */
    public int getFilesCount() {
      return files_.size();
    }
    /**
     * <code>repeated string files = 1;</code>
     * @param index The index of the element to return.
     * @return The files at the given index.
     */
    public java.lang.String getFiles(int index) {
      return files_.get(index);
    }
    /**
     * <code>repeated string files = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the files at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getFilesBytes(int index) {
      return files_.getByteString(index);
    }

    public static final int COOKIE_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object cookie_ = "";
    /**
     * <code>required string cookie = 2;</code>
     * @return Whether the cookie field is set.
     */
    @java.lang.Override
    public boolean hasCookie() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string cookie = 2;</code>
     * @return The cookie.
     */
    @java.lang.Override
    public java.lang.String getCookie() {
      java.lang.Object ref = cookie_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          cookie_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string cookie = 2;</code>
     * @return The bytes for cookie.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCookieBytes() {
      java.lang.Object ref = cookie_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        cookie_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasCookie()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < files_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, files_.getRaw(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, cookie_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < files_.size(); i++) {
          dataSize += computeStringSizeNoTag(files_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getFilesList().size();
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, cookie_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) obj;

      if (!getFilesList()
          .equals(other.getFilesList())) return false;
      if (hasCookie() != other.hasCookie()) return false;
      if (hasCookie()) {
        if (!getCookie()
            .equals(other.getCookie())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getFilesCount() > 0) {
        hash = (37 * hash) + FILES_FIELD_NUMBER;
        hash = (53 * hash) + getFilesList().hashCode();
      }
      if (hasCookie()) {
        hash = (37 * hash) + COOKIE_FIELD_NUMBER;
        hash = (53 * hash) + getCookie().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Contains a list of paths corresponding to corrupt files and a cookie
     * used for iterative calls to NameNode.listCorruptFileBlocks.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.CorruptFileBlocksProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CorruptFileBlocksProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        files_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        cookie_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          files_.makeImmutable();
          result.files_ = files_;
        }
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.cookie_ = cookie_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto.getDefaultInstance()) return this;
        if (!other.files_.isEmpty()) {
          if (files_.isEmpty()) {
            files_ = other.files_;
            bitField0_ |= 0x00000001;
          } else {
            ensureFilesIsMutable();
            files_.addAll(other.files_);
          }
          onChanged();
        }
        if (other.hasCookie()) {
          cookie_ = other.cookie_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasCookie()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureFilesIsMutable();
                files_.add(bs);
                break;
              } // case 10
              case 18: {
                cookie_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList files_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureFilesIsMutable() {
        if (!files_.isModifiable()) {
          files_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(files_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @return A list containing the files.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getFilesList() {
        files_.makeImmutable();
        return files_;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @return The count of files.
       */
      public int getFilesCount() {
        return files_.size();
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param index The index of the element to return.
       * @return The files at the given index.
       */
      public java.lang.String getFiles(int index) {
        return files_.get(index);
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the files at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getFilesBytes(int index) {
        return files_.getByteString(index);
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param index The index to set the value at.
       * @param value The files to set.
       * @return This builder for chaining.
       */
      public Builder setFiles(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFilesIsMutable();
        files_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param value The files to add.
       * @return This builder for chaining.
       */
      public Builder addFiles(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFilesIsMutable();
        files_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param values The files to add.
       * @return This builder for chaining.
       */
      public Builder addAllFiles(
          java.lang.Iterable<java.lang.String> values) {
        ensureFilesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, files_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFiles() {
        files_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string files = 1;</code>
       * @param value The bytes of the files to add.
       * @return This builder for chaining.
       */
      public Builder addFilesBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureFilesIsMutable();
        files_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object cookie_ = "";
      /**
       * <code>required string cookie = 2;</code>
       * @return Whether the cookie field is set.
       */
      public boolean hasCookie() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string cookie = 2;</code>
       * @return The cookie.
       */
      public java.lang.String getCookie() {
        java.lang.Object ref = cookie_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            cookie_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string cookie = 2;</code>
       * @return The bytes for cookie.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCookieBytes() {
        java.lang.Object ref = cookie_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          cookie_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string cookie = 2;</code>
       * @param value The cookie to set.
       * @return This builder for chaining.
       */
      public Builder setCookie(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        cookie_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string cookie = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearCookie() {
        cookie_ = getDefaultInstance().getCookie();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string cookie = 2;</code>
       * @param value The bytes for cookie to set.
       * @return This builder for chaining.
       */
      public Builder setCookieBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        cookie_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CorruptFileBlocksProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.CorruptFileBlocksProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CorruptFileBlocksProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CorruptFileBlocksProto>() {
      @java.lang.Override
      public CorruptFileBlocksProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CorruptFileBlocksProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CorruptFileBlocksProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageTypesProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageTypesProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @return A list containing the storageTypes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @return The count of storageTypes.
     */
    int getStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);
  }
  /**
   * <pre>
   **
   * A list of storage types. 
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.StorageTypesProto}
   */
  public static final class StorageTypesProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageTypesProto)
      StorageTypesProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageTypesProto.newBuilder() to construct.
    private StorageTypesProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageTypesProto() {
      storageTypes_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageTypesProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
    }

    public static final int STORAGETYPES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> storageTypes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @return A list containing the storageTypes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @return The count of storageTypes.
     */
    @java.lang.Override
    public int getStorageTypesCount() {
      return storageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
      return storageTypes_converter_.convert(storageTypes_.get(index));
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < storageTypes_.size(); i++) {
        output.writeEnum(1, storageTypes_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < storageTypes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(storageTypes_.get(i));
        }
        size += dataSize;
        size += 1 * storageTypes_.size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) obj;

      if (!storageTypes_.equals(other.storageTypes_)) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getStorageTypesCount() > 0) {
        hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + storageTypes_.hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * A list of storage types. 
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.StorageTypesProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageTypesProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result) {
        if (((bitField0_ & 0x00000001) != 0)) {
          storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
          bitField0_ = (bitField0_ & ~0x00000001);
        }
        result.storageTypes_ = storageTypes_;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) return this;
        if (!other.storageTypes_.isEmpty()) {
          if (storageTypes_.isEmpty()) {
            storageTypes_ = other.storageTypes_;
            bitField0_ = (bitField0_ & ~0x00000001);
          } else {
            ensureStorageTypesIsMutable();
            storageTypes_.addAll(other.storageTypes_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  ensureStorageTypesIsMutable();
                  storageTypes_.add(tmpRaw);
                }
                break;
              } // case 8
              case 10: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(1, tmpRaw);
                  } else {
                    ensureStorageTypesIsMutable();
                    storageTypes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<java.lang.Integer> storageTypes_ =
        java.util.Collections.emptyList();
      private void ensureStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          storageTypes_ = new java.util.ArrayList<java.lang.Integer>(storageTypes_);
          bitField0_ |= 0x00000001;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @return A list containing the storageTypes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @return The count of storageTypes.
       */
      public int getStorageTypesCount() {
        return storageTypes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @param index The index of the element to return.
       * @return The storageTypes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
        return storageTypes_converter_.convert(storageTypes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @param index The index to set the value at.
       * @param value The storageTypes to set.
       * @return This builder for chaining.
       */
      public Builder setStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @param value The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @param values The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
        ensureStorageTypesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
          storageTypes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageTypes() {
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageTypesProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageTypesProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypesProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageTypesProto>() {
      @java.lang.Override
      public StorageTypesProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypesProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageTypesProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockStoragePolicyProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockStoragePolicyProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint32 policyId = 1;</code>
     * @return Whether the policyId field is set.
     */
    boolean hasPolicyId();
    /**
     * <code>required uint32 policyId = 1;</code>
     * @return The policyId.
     */
    int getPolicyId();

    /**
     * <code>required string name = 2;</code>
     * @return Whether the name field is set.
     */
    boolean hasName();
    /**
     * <code>required string name = 2;</code>
     * @return The name.
     */
    java.lang.String getName();
    /**
     * <code>required string name = 2;</code>
     * @return The bytes for name.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes();

    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     * @return Whether the creationPolicy field is set.
     */
    boolean hasCreationPolicy();
    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     * @return The creationPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy();
    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder();

    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     * @return Whether the creationFallbackPolicy field is set.
     */
    boolean hasCreationFallbackPolicy();
    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     * @return The creationFallbackPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy();
    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     * @return Whether the replicationFallbackPolicy field is set.
     */
    boolean hasReplicationFallbackPolicy();
    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     * @return The replicationFallbackPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy();
    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder();
  }
  /**
   * <pre>
   **
   * Block replica storage policy.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
   */
  public static final class BlockStoragePolicyProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockStoragePolicyProto)
      BlockStoragePolicyProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockStoragePolicyProto.newBuilder() to construct.
    private BlockStoragePolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockStoragePolicyProto() {
      name_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockStoragePolicyProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
    }

    private int bitField0_;
    public static final int POLICYID_FIELD_NUMBER = 1;
    private int policyId_ = 0;
    /**
     * <code>required uint32 policyId = 1;</code>
     * @return Whether the policyId field is set.
     */
    @java.lang.Override
    public boolean hasPolicyId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint32 policyId = 1;</code>
     * @return The policyId.
     */
    @java.lang.Override
    public int getPolicyId() {
      return policyId_;
    }

    public static final int NAME_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object name_ = "";
    /**
     * <code>required string name = 2;</code>
     * @return Whether the name field is set.
     */
    @java.lang.Override
    public boolean hasName() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string name = 2;</code>
     * @return The name.
     */
    @java.lang.Override
    public java.lang.String getName() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          name_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string name = 2;</code>
     * @return The bytes for name.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        name_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CREATIONPOLICY_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_;
    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     * @return Whether the creationPolicy field is set.
     */
    @java.lang.Override
    public boolean hasCreationPolicy() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     * @return The creationPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
      return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_;
    }
    /**
     * <pre>
     * a list of storage types for storing the block replicas when creating a
     * block.
     * </pre>
     *
     * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
      return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_;
    }

    public static final int CREATIONFALLBACKPOLICY_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_;
    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     * @return Whether the creationFallbackPolicy field is set.
     */
    @java.lang.Override
    public boolean hasCreationFallbackPolicy() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     * @return The creationFallbackPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
      return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_;
    }
    /**
     * <pre>
     * A list of storage types for creation fallback storage.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
      return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_;
    }

    public static final int REPLICATIONFALLBACKPOLICY_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_;
    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     * @return Whether the replicationFallbackPolicy field is set.
     */
    @java.lang.Override
    public boolean hasReplicationFallbackPolicy() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     * @return The replicationFallbackPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
      return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_;
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
      return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPolicyId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCreationPolicy()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, policyId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getCreationPolicy());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(4, getCreationFallbackPolicy());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(5, getReplicationFallbackPolicy());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, policyId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getCreationPolicy());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getCreationFallbackPolicy());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getReplicationFallbackPolicy());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) obj;

      if (hasPolicyId() != other.hasPolicyId()) return false;
      if (hasPolicyId()) {
        if (getPolicyId()
            != other.getPolicyId()) return false;
      }
      if (hasName() != other.hasName()) return false;
      if (hasName()) {
        if (!getName()
            .equals(other.getName())) return false;
      }
      if (hasCreationPolicy() != other.hasCreationPolicy()) return false;
      if (hasCreationPolicy()) {
        if (!getCreationPolicy()
            .equals(other.getCreationPolicy())) return false;
      }
      if (hasCreationFallbackPolicy() != other.hasCreationFallbackPolicy()) return false;
      if (hasCreationFallbackPolicy()) {
        if (!getCreationFallbackPolicy()
            .equals(other.getCreationFallbackPolicy())) return false;
      }
      if (hasReplicationFallbackPolicy() != other.hasReplicationFallbackPolicy()) return false;
      if (hasReplicationFallbackPolicy()) {
        if (!getReplicationFallbackPolicy()
            .equals(other.getReplicationFallbackPolicy())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPolicyId()) {
        hash = (37 * hash) + POLICYID_FIELD_NUMBER;
        hash = (53 * hash) + getPolicyId();
      }
      if (hasName()) {
        hash = (37 * hash) + NAME_FIELD_NUMBER;
        hash = (53 * hash) + getName().hashCode();
      }
      if (hasCreationPolicy()) {
        hash = (37 * hash) + CREATIONPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getCreationPolicy().hashCode();
      }
      if (hasCreationFallbackPolicy()) {
        hash = (37 * hash) + CREATIONFALLBACKPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getCreationFallbackPolicy().hashCode();
      }
      if (hasReplicationFallbackPolicy()) {
        hash = (37 * hash) + REPLICATIONFALLBACKPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getReplicationFallbackPolicy().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Block replica storage policy.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.BlockStoragePolicyProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockStoragePolicyProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getCreationPolicyFieldBuilder();
          getCreationFallbackPolicyFieldBuilder();
          getReplicationFallbackPolicyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        policyId_ = 0;
        name_ = "";
        creationPolicy_ = null;
        if (creationPolicyBuilder_ != null) {
          creationPolicyBuilder_.dispose();
          creationPolicyBuilder_ = null;
        }
        creationFallbackPolicy_ = null;
        if (creationFallbackPolicyBuilder_ != null) {
          creationFallbackPolicyBuilder_.dispose();
          creationFallbackPolicyBuilder_ = null;
        }
        replicationFallbackPolicy_ = null;
        if (replicationFallbackPolicyBuilder_ != null) {
          replicationFallbackPolicyBuilder_.dispose();
          replicationFallbackPolicyBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.policyId_ = policyId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.name_ = name_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.creationPolicy_ = creationPolicyBuilder_ == null
              ? creationPolicy_
              : creationPolicyBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.creationFallbackPolicy_ = creationFallbackPolicyBuilder_ == null
              ? creationFallbackPolicy_
              : creationFallbackPolicyBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.replicationFallbackPolicy_ = replicationFallbackPolicyBuilder_ == null
              ? replicationFallbackPolicy_
              : replicationFallbackPolicyBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto.getDefaultInstance()) return this;
        if (other.hasPolicyId()) {
          setPolicyId(other.getPolicyId());
        }
        if (other.hasName()) {
          name_ = other.name_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasCreationPolicy()) {
          mergeCreationPolicy(other.getCreationPolicy());
        }
        if (other.hasCreationFallbackPolicy()) {
          mergeCreationFallbackPolicy(other.getCreationFallbackPolicy());
        }
        if (other.hasReplicationFallbackPolicy()) {
          mergeReplicationFallbackPolicy(other.getReplicationFallbackPolicy());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPolicyId()) {
          return false;
        }
        if (!hasName()) {
          return false;
        }
        if (!hasCreationPolicy()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                policyId_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 18: {
                name_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getCreationPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getCreationFallbackPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                input.readMessage(
                    getReplicationFallbackPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int policyId_ ;
      /**
       * <code>required uint32 policyId = 1;</code>
       * @return Whether the policyId field is set.
       */
      @java.lang.Override
      public boolean hasPolicyId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint32 policyId = 1;</code>
       * @return The policyId.
       */
      @java.lang.Override
      public int getPolicyId() {
        return policyId_;
      }
      /**
       * <code>required uint32 policyId = 1;</code>
       * @param value The policyId to set.
       * @return This builder for chaining.
       */
      public Builder setPolicyId(int value) {

        policyId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 policyId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearPolicyId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        policyId_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object name_ = "";
      /**
       * <code>required string name = 2;</code>
       * @return Whether the name field is set.
       */
      public boolean hasName() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string name = 2;</code>
       * @return The name.
       */
      public java.lang.String getName() {
        java.lang.Object ref = name_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            name_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string name = 2;</code>
       * @return The bytes for name.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNameBytes() {
        java.lang.Object ref = name_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          name_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string name = 2;</code>
       * @param value The name to set.
       * @return This builder for chaining.
       */
      public Builder setName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string name = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearName() {
        name_ = getDefaultInstance().getName();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string name = 2;</code>
       * @param value The bytes for name to set.
       * @return This builder for chaining.
       */
      public Builder setNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationPolicyBuilder_;
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       * @return Whether the creationPolicy field is set.
       */
      public boolean hasCreationPolicy() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       * @return The creationPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationPolicy() {
        if (creationPolicyBuilder_ == null) {
          return creationPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_;
        } else {
          return creationPolicyBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public Builder setCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (creationPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          creationPolicy_ = value;
        } else {
          creationPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public Builder setCreationPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (creationPolicyBuilder_ == null) {
          creationPolicy_ = builderForValue.build();
        } else {
          creationPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public Builder mergeCreationPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (creationPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            creationPolicy_ != null &&
            creationPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
            getCreationPolicyBuilder().mergeFrom(value);
          } else {
            creationPolicy_ = value;
          }
        } else {
          creationPolicyBuilder_.mergeFrom(value);
        }
        if (creationPolicy_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public Builder clearCreationPolicy() {
        bitField0_ = (bitField0_ & ~0x00000004);
        creationPolicy_ = null;
        if (creationPolicyBuilder_ != null) {
          creationPolicyBuilder_.dispose();
          creationPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationPolicyBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getCreationPolicyFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationPolicyOrBuilder() {
        if (creationPolicyBuilder_ != null) {
          return creationPolicyBuilder_.getMessageOrBuilder();
        } else {
          return creationPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationPolicy_;
        }
      }
      /**
       * <pre>
       * a list of storage types for storing the block replicas when creating a
       * block.
       * </pre>
       *
       * <code>required .hadoop.hdfs.StorageTypesProto creationPolicy = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
          getCreationPolicyFieldBuilder() {
        if (creationPolicyBuilder_ == null) {
          creationPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
                  getCreationPolicy(),
                  getParentForChildren(),
                  isClean());
          creationPolicy_ = null;
        }
        return creationPolicyBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto creationFallbackPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> creationFallbackPolicyBuilder_;
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       * @return Whether the creationFallbackPolicy field is set.
       */
      public boolean hasCreationFallbackPolicy() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       * @return The creationFallbackPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getCreationFallbackPolicy() {
        if (creationFallbackPolicyBuilder_ == null) {
          return creationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_;
        } else {
          return creationFallbackPolicyBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public Builder setCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (creationFallbackPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          creationFallbackPolicy_ = value;
        } else {
          creationFallbackPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public Builder setCreationFallbackPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (creationFallbackPolicyBuilder_ == null) {
          creationFallbackPolicy_ = builderForValue.build();
        } else {
          creationFallbackPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public Builder mergeCreationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (creationFallbackPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            creationFallbackPolicy_ != null &&
            creationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
            getCreationFallbackPolicyBuilder().mergeFrom(value);
          } else {
            creationFallbackPolicy_ = value;
          }
        } else {
          creationFallbackPolicyBuilder_.mergeFrom(value);
        }
        if (creationFallbackPolicy_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public Builder clearCreationFallbackPolicy() {
        bitField0_ = (bitField0_ & ~0x00000008);
        creationFallbackPolicy_ = null;
        if (creationFallbackPolicyBuilder_ != null) {
          creationFallbackPolicyBuilder_.dispose();
          creationFallbackPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getCreationFallbackPolicyBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getCreationFallbackPolicyFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getCreationFallbackPolicyOrBuilder() {
        if (creationFallbackPolicyBuilder_ != null) {
          return creationFallbackPolicyBuilder_.getMessageOrBuilder();
        } else {
          return creationFallbackPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : creationFallbackPolicy_;
        }
      }
      /**
       * <pre>
       * A list of storage types for creation fallback storage.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.StorageTypesProto creationFallbackPolicy = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
          getCreationFallbackPolicyFieldBuilder() {
        if (creationFallbackPolicyBuilder_ == null) {
          creationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
                  getCreationFallbackPolicy(),
                  getParentForChildren(),
                  isClean());
          creationFallbackPolicy_ = null;
        }
        return creationFallbackPolicyBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto replicationFallbackPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> replicationFallbackPolicyBuilder_;
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       * @return Whether the replicationFallbackPolicy field is set.
       */
      public boolean hasReplicationFallbackPolicy() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       * @return The replicationFallbackPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto getReplicationFallbackPolicy() {
        if (replicationFallbackPolicyBuilder_ == null) {
          return replicationFallbackPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_;
        } else {
          return replicationFallbackPolicyBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public Builder setReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (replicationFallbackPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          replicationFallbackPolicy_ = value;
        } else {
          replicationFallbackPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public Builder setReplicationFallbackPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder builderForValue) {
        if (replicationFallbackPolicyBuilder_ == null) {
          replicationFallbackPolicy_ = builderForValue.build();
        } else {
          replicationFallbackPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public Builder mergeReplicationFallbackPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto value) {
        if (replicationFallbackPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            replicationFallbackPolicy_ != null &&
            replicationFallbackPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance()) {
            getReplicationFallbackPolicyBuilder().mergeFrom(value);
          } else {
            replicationFallbackPolicy_ = value;
          }
        } else {
          replicationFallbackPolicyBuilder_.mergeFrom(value);
        }
        if (replicationFallbackPolicy_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public Builder clearReplicationFallbackPolicy() {
        bitField0_ = (bitField0_ & ~0x00000010);
        replicationFallbackPolicy_ = null;
        if (replicationFallbackPolicyBuilder_ != null) {
          replicationFallbackPolicyBuilder_.dispose();
          replicationFallbackPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder getReplicationFallbackPolicyBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getReplicationFallbackPolicyFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder getReplicationFallbackPolicyOrBuilder() {
        if (replicationFallbackPolicyBuilder_ != null) {
          return replicationFallbackPolicyBuilder_.getMessageOrBuilder();
        } else {
          return replicationFallbackPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.getDefaultInstance() : replicationFallbackPolicy_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypesProto replicationFallbackPolicy = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder> 
          getReplicationFallbackPolicyFieldBuilder() {
        if (replicationFallbackPolicyBuilder_ == null) {
          replicationFallbackPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProtoOrBuilder>(
                  getReplicationFallbackPolicy(),
                  getParentForChildren(),
                  isClean());
          replicationFallbackPolicy_ = null;
        }
        return replicationFallbackPolicyBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockStoragePolicyProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockStoragePolicyProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockStoragePolicyProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockStoragePolicyProto>() {
      @java.lang.Override
      public BlockStoragePolicyProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockStoragePolicyProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockStoragePolicyProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface LocatedBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     * @return Whether the b field is set.
     */
    boolean hasB();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     * @return The b.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder();

    /**
     * <pre>
     * offset of first byte of block in the file
     * </pre>
     *
     * <code>required uint64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    boolean hasOffset();
    /**
     * <pre>
     * offset of first byte of block in the file
     * </pre>
     *
     * <code>required uint64 offset = 2;</code>
     * @return The offset.
     */
    long getOffset();

    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> 
        getLocsList();
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index);
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    int getLocsCount();
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getLocsOrBuilderList();
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
        int index);

    /**
     * <pre>
     * true if all replicas of a block are corrupt, else false
     * </pre>
     *
     * <code>required bool corrupt = 4;</code>
     * @return Whether the corrupt field is set.
     */
    boolean hasCorrupt();
    /**
     * <pre>
     * true if all replicas of a block are corrupt, else false
     * </pre>
     *
     * <code>required bool corrupt = 4;</code>
     * @return The corrupt.
     */
    boolean getCorrupt();

    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     * @return Whether the blockToken field is set.
     */
    boolean hasBlockToken();
    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     * @return The blockToken.
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken();
    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder();

    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @return A list containing the isCached.
     */
    java.util.List<java.lang.Boolean> getIsCachedList();
    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @return The count of isCached.
     */
    int getIsCachedCount();
    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The isCached at the given index.
     */
    boolean getIsCached(int index);

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return A list containing the storageTypes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return The count of storageTypes.
     */
    int getStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);

    /**
     * <code>repeated string storageIDs = 8;</code>
     * @return A list containing the storageIDs.
     */
    java.util.List<java.lang.String>
        getStorageIDsList();
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @return The count of storageIDs.
     */
    int getStorageIDsCount();
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @param index The index of the element to return.
     * @return The storageIDs at the given index.
     */
    java.lang.String getStorageIDs(int index);
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageIDs at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIDsBytes(int index);

    /**
     * <pre>
     * striped block related fields
     * </pre>
     *
     * <code>optional bytes blockIndices = 9;</code>
     * @return Whether the blockIndices field is set.
     */
    boolean hasBlockIndices();
    /**
     * <pre>
     * striped block related fields
     * </pre>
     *
     * <code>optional bytes blockIndices = 9;</code>
     * @return The blockIndices.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices();

    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> 
        getBlockTokensList();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index);
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    int getBlockTokensCount();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
        getBlockTokensOrBuilderList();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * A LocatedBlock gives information about a block and its location.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
   */
  public static final class LocatedBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlockProto)
      LocatedBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use LocatedBlockProto.newBuilder() to construct.
    private LocatedBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private LocatedBlockProto() {
      locs_ = java.util.Collections.emptyList();
      isCached_ = emptyBooleanList();
      storageTypes_ = java.util.Collections.emptyList();
      storageIDs_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      blockTokens_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new LocatedBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int B_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     * @return Whether the b field is set.
     */
    @java.lang.Override
    public boolean hasB() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     * @return The b.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
      return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_;
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
      return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_;
    }

    public static final int OFFSET_FIELD_NUMBER = 2;
    private long offset_ = 0L;
    /**
     * <pre>
     * offset of first byte of block in the file
     * </pre>
     *
     * <code>required uint64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    @java.lang.Override
    public boolean hasOffset() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * offset of first byte of block in the file
     * </pre>
     *
     * <code>required uint64 offset = 2;</code>
     * @return The offset.
     */
    @java.lang.Override
    public long getOffset() {
      return offset_;
    }

    public static final int LOCS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> locs_;
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getLocsList() {
      return locs_;
    }
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getLocsOrBuilderList() {
      return locs_;
    }
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    @java.lang.Override
    public int getLocsCount() {
      return locs_.size();
    }
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
      return locs_.get(index);
    }
    /**
     * <pre>
     * Locations ordered by proximity to client ip
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
        int index) {
      return locs_.get(index);
    }

    public static final int CORRUPT_FIELD_NUMBER = 4;
    private boolean corrupt_ = false;
    /**
     * <pre>
     * true if all replicas of a block are corrupt, else false
     * </pre>
     *
     * <code>required bool corrupt = 4;</code>
     * @return Whether the corrupt field is set.
     */
    @java.lang.Override
    public boolean hasCorrupt() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     * true if all replicas of a block are corrupt, else false
     * </pre>
     *
     * <code>required bool corrupt = 4;</code>
     * @return The corrupt.
     */
    @java.lang.Override
    public boolean getCorrupt() {
      return corrupt_;
    }

    public static final int BLOCKTOKEN_FIELD_NUMBER = 5;
    private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_;
    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     * @return Whether the blockToken field is set.
     */
    @java.lang.Override
    public boolean hasBlockToken() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     * @return The blockToken.
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
      return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_;
    }
    /**
     * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
      return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_;
    }

    public static final int ISCACHED_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_ =
        emptyBooleanList();
    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @return A list containing the isCached.
     */
    @java.lang.Override
    public java.util.List<java.lang.Boolean>
        getIsCachedList() {
      return isCached_;
    }
    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @return The count of isCached.
     */
    public int getIsCachedCount() {
      return isCached_.size();
    }
    /**
     * <pre>
     * if a location in locs is cached
     * </pre>
     *
     * <code>repeated bool isCached = 6 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The isCached at the given index.
     */
    public boolean getIsCached(int index) {
      return isCached_.getBoolean(index);
    }
    private int isCachedMemoizedSerializedSize = -1;

    public static final int STORAGETYPES_FIELD_NUMBER = 7;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> storageTypes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return A list containing the storageTypes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return The count of storageTypes.
     */
    @java.lang.Override
    public int getStorageTypesCount() {
      return storageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
      return storageTypes_converter_.convert(storageTypes_.get(index));
    }

    public static final int STORAGEIDS_FIELD_NUMBER = 8;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageIDs_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @return A list containing the storageIDs.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getStorageIDsList() {
      return storageIDs_;
    }
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @return The count of storageIDs.
     */
    public int getStorageIDsCount() {
      return storageIDs_.size();
    }
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @param index The index of the element to return.
     * @return The storageIDs at the given index.
     */
    public java.lang.String getStorageIDs(int index) {
      return storageIDs_.get(index);
    }
    /**
     * <code>repeated string storageIDs = 8;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageIDs at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIDsBytes(int index) {
      return storageIDs_.getByteString(index);
    }

    public static final int BLOCKINDICES_FIELD_NUMBER = 9;
    private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <pre>
     * striped block related fields
     * </pre>
     *
     * <code>optional bytes blockIndices = 9;</code>
     * @return Whether the blockIndices field is set.
     */
    @java.lang.Override
    public boolean hasBlockIndices() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <pre>
     * striped block related fields
     * </pre>
     *
     * <code>optional bytes blockIndices = 9;</code>
     * @return The blockIndices.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() {
      return blockIndices_;
    }

    public static final int BLOCKTOKENS_FIELD_NUMBER = 10;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> blockTokens_;
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> getBlockTokensList() {
      return blockTokens_;
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
        getBlockTokensOrBuilderList() {
      return blockTokens_;
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    @java.lang.Override
    public int getBlockTokensCount() {
      return blockTokens_.size();
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
      return blockTokens_.get(index);
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
        int index) {
      return blockTokens_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasB()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOffset()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCorrupt()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockToken()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getB().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getLocsCount(); i++) {
        if (!getLocs(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (!getBlockToken().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlockTokensCount(); i++) {
        if (!getBlockTokens(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getB());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, offset_);
      }
      for (int i = 0; i < locs_.size(); i++) {
        output.writeMessage(3, locs_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBool(4, corrupt_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(5, getBlockToken());
      }
      if (getIsCachedList().size() > 0) {
        output.writeUInt32NoTag(50);
        output.writeUInt32NoTag(isCachedMemoizedSerializedSize);
      }
      for (int i = 0; i < isCached_.size(); i++) {
        output.writeBoolNoTag(isCached_.getBoolean(i));
      }
      for (int i = 0; i < storageTypes_.size(); i++) {
        output.writeEnum(7, storageTypes_.get(i));
      }
      for (int i = 0; i < storageIDs_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIDs_.getRaw(i));
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBytes(9, blockIndices_);
      }
      for (int i = 0; i < blockTokens_.size(); i++) {
        output.writeMessage(10, blockTokens_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getB());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, offset_);
      }
      for (int i = 0; i < locs_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, locs_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, corrupt_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getBlockToken());
      }
      {
        int dataSize = 0;
        dataSize = 1 * getIsCachedList().size();
        size += dataSize;
        if (!getIsCachedList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        isCachedMemoizedSerializedSize = dataSize;
      }
      {
        int dataSize = 0;
        for (int i = 0; i < storageTypes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(storageTypes_.get(i));
        }
        size += dataSize;
        size += 1 * storageTypes_.size();
      }
      {
        int dataSize = 0;
        for (int i = 0; i < storageIDs_.size(); i++) {
          dataSize += computeStringSizeNoTag(storageIDs_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getStorageIDsList().size();
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(9, blockIndices_);
      }
      for (int i = 0; i < blockTokens_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(10, blockTokens_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) obj;

      if (hasB() != other.hasB()) return false;
      if (hasB()) {
        if (!getB()
            .equals(other.getB())) return false;
      }
      if (hasOffset() != other.hasOffset()) return false;
      if (hasOffset()) {
        if (getOffset()
            != other.getOffset()) return false;
      }
      if (!getLocsList()
          .equals(other.getLocsList())) return false;
      if (hasCorrupt() != other.hasCorrupt()) return false;
      if (hasCorrupt()) {
        if (getCorrupt()
            != other.getCorrupt()) return false;
      }
      if (hasBlockToken() != other.hasBlockToken()) return false;
      if (hasBlockToken()) {
        if (!getBlockToken()
            .equals(other.getBlockToken())) return false;
      }
      if (!getIsCachedList()
          .equals(other.getIsCachedList())) return false;
      if (!storageTypes_.equals(other.storageTypes_)) return false;
      if (!getStorageIDsList()
          .equals(other.getStorageIDsList())) return false;
      if (hasBlockIndices() != other.hasBlockIndices()) return false;
      if (hasBlockIndices()) {
        if (!getBlockIndices()
            .equals(other.getBlockIndices())) return false;
      }
      if (!getBlockTokensList()
          .equals(other.getBlockTokensList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasB()) {
        hash = (37 * hash) + B_FIELD_NUMBER;
        hash = (53 * hash) + getB().hashCode();
      }
      if (hasOffset()) {
        hash = (37 * hash) + OFFSET_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getOffset());
      }
      if (getLocsCount() > 0) {
        hash = (37 * hash) + LOCS_FIELD_NUMBER;
        hash = (53 * hash) + getLocsList().hashCode();
      }
      if (hasCorrupt()) {
        hash = (37 * hash) + CORRUPT_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getCorrupt());
      }
      if (hasBlockToken()) {
        hash = (37 * hash) + BLOCKTOKEN_FIELD_NUMBER;
        hash = (53 * hash) + getBlockToken().hashCode();
      }
      if (getIsCachedCount() > 0) {
        hash = (37 * hash) + ISCACHED_FIELD_NUMBER;
        hash = (53 * hash) + getIsCachedList().hashCode();
      }
      if (getStorageTypesCount() > 0) {
        hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + storageTypes_.hashCode();
      }
      if (getStorageIDsCount() > 0) {
        hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER;
        hash = (53 * hash) + getStorageIDsList().hashCode();
      }
      if (hasBlockIndices()) {
        hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
        hash = (53 * hash) + getBlockIndices().hashCode();
      }
      if (getBlockTokensCount() > 0) {
        hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockTokensList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * A LocatedBlock gives information about a block and its location.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.LocatedBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBFieldBuilder();
          getLocsFieldBuilder();
          getBlockTokenFieldBuilder();
          getBlockTokensFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        b_ = null;
        if (bBuilder_ != null) {
          bBuilder_.dispose();
          bBuilder_ = null;
        }
        offset_ = 0L;
        if (locsBuilder_ == null) {
          locs_ = java.util.Collections.emptyList();
        } else {
          locs_ = null;
          locsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        corrupt_ = false;
        blockToken_ = null;
        if (blockTokenBuilder_ != null) {
          blockTokenBuilder_.dispose();
          blockTokenBuilder_ = null;
        }
        isCached_ = emptyBooleanList();
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000040);
        storageIDs_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        if (blockTokensBuilder_ == null) {
          blockTokens_ = java.util.Collections.emptyList();
        } else {
          blockTokens_ = null;
          blockTokensBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000200);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result) {
        if (locsBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            locs_ = java.util.Collections.unmodifiableList(locs_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.locs_ = locs_;
        } else {
          result.locs_ = locsBuilder_.build();
        }
        if (((bitField0_ & 0x00000040) != 0)) {
          storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
          bitField0_ = (bitField0_ & ~0x00000040);
        }
        result.storageTypes_ = storageTypes_;
        if (blockTokensBuilder_ == null) {
          if (((bitField0_ & 0x00000200) != 0)) {
            blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
            bitField0_ = (bitField0_ & ~0x00000200);
          }
          result.blockTokens_ = blockTokens_;
        } else {
          result.blockTokens_ = blockTokensBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.b_ = bBuilder_ == null
              ? b_
              : bBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.offset_ = offset_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.corrupt_ = corrupt_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.blockToken_ = blockTokenBuilder_ == null
              ? blockToken_
              : blockTokenBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          isCached_.makeImmutable();
          result.isCached_ = isCached_;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          storageIDs_.makeImmutable();
          result.storageIDs_ = storageIDs_;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.blockIndices_ = blockIndices_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) return this;
        if (other.hasB()) {
          mergeB(other.getB());
        }
        if (other.hasOffset()) {
          setOffset(other.getOffset());
        }
        if (locsBuilder_ == null) {
          if (!other.locs_.isEmpty()) {
            if (locs_.isEmpty()) {
              locs_ = other.locs_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureLocsIsMutable();
              locs_.addAll(other.locs_);
            }
            onChanged();
          }
        } else {
          if (!other.locs_.isEmpty()) {
            if (locsBuilder_.isEmpty()) {
              locsBuilder_.dispose();
              locsBuilder_ = null;
              locs_ = other.locs_;
              bitField0_ = (bitField0_ & ~0x00000004);
              locsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getLocsFieldBuilder() : null;
            } else {
              locsBuilder_.addAllMessages(other.locs_);
            }
          }
        }
        if (other.hasCorrupt()) {
          setCorrupt(other.getCorrupt());
        }
        if (other.hasBlockToken()) {
          mergeBlockToken(other.getBlockToken());
        }
        if (!other.isCached_.isEmpty()) {
          if (isCached_.isEmpty()) {
            isCached_ = other.isCached_;
            isCached_.makeImmutable();
            bitField0_ |= 0x00000020;
          } else {
            ensureIsCachedIsMutable();
            isCached_.addAll(other.isCached_);
          }
          onChanged();
        }
        if (!other.storageTypes_.isEmpty()) {
          if (storageTypes_.isEmpty()) {
            storageTypes_ = other.storageTypes_;
            bitField0_ = (bitField0_ & ~0x00000040);
          } else {
            ensureStorageTypesIsMutable();
            storageTypes_.addAll(other.storageTypes_);
          }
          onChanged();
        }
        if (!other.storageIDs_.isEmpty()) {
          if (storageIDs_.isEmpty()) {
            storageIDs_ = other.storageIDs_;
            bitField0_ |= 0x00000080;
          } else {
            ensureStorageIDsIsMutable();
            storageIDs_.addAll(other.storageIDs_);
          }
          onChanged();
        }
        if (other.hasBlockIndices()) {
          setBlockIndices(other.getBlockIndices());
        }
        if (blockTokensBuilder_ == null) {
          if (!other.blockTokens_.isEmpty()) {
            if (blockTokens_.isEmpty()) {
              blockTokens_ = other.blockTokens_;
              bitField0_ = (bitField0_ & ~0x00000200);
            } else {
              ensureBlockTokensIsMutable();
              blockTokens_.addAll(other.blockTokens_);
            }
            onChanged();
          }
        } else {
          if (!other.blockTokens_.isEmpty()) {
            if (blockTokensBuilder_.isEmpty()) {
              blockTokensBuilder_.dispose();
              blockTokensBuilder_ = null;
              blockTokens_ = other.blockTokens_;
              bitField0_ = (bitField0_ & ~0x00000200);
              blockTokensBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlockTokensFieldBuilder() : null;
            } else {
              blockTokensBuilder_.addAllMessages(other.blockTokens_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasB()) {
          return false;
        }
        if (!hasOffset()) {
          return false;
        }
        if (!hasCorrupt()) {
          return false;
        }
        if (!hasBlockToken()) {
          return false;
        }
        if (!getB().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getLocsCount(); i++) {
          if (!getLocs(i).isInitialized()) {
            return false;
          }
        }
        if (!getBlockToken().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getBlockTokensCount(); i++) {
          if (!getBlockTokens(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getBFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                offset_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER,
                        extensionRegistry);
                if (locsBuilder_ == null) {
                  ensureLocsIsMutable();
                  locs_.add(m);
                } else {
                  locsBuilder_.addMessage(m);
                }
                break;
              } // case 26
              case 32: {
                corrupt_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 42: {
                input.readMessage(
                    getBlockTokenFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 48: {
                boolean v = input.readBool();
                ensureIsCachedIsMutable();
                isCached_.addBoolean(v);
                break;
              } // case 48
              case 50: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                int alloc = length > 4096 ? 4096 : length;
                ensureIsCachedIsMutable(alloc / 1);
                while (input.getBytesUntilLimit() > 0) {
                  isCached_.addBoolean(input.readBool());
                }
                input.popLimit(limit);
                break;
              } // case 50
              case 56: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(7, tmpRaw);
                } else {
                  ensureStorageTypesIsMutable();
                  storageTypes_.add(tmpRaw);
                }
                break;
              } // case 56
              case 58: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(7, tmpRaw);
                  } else {
                    ensureStorageTypesIsMutable();
                    storageTypes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 58
              case 66: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureStorageIDsIsMutable();
                storageIDs_.add(bs);
                break;
              } // case 66
              case 74: {
                blockIndices_ = input.readBytes();
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              case 82: {
                org.apache.hadoop.security.proto.SecurityProtos.TokenProto m =
                    input.readMessage(
                        org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER,
                        extensionRegistry);
                if (blockTokensBuilder_ == null) {
                  ensureBlockTokensIsMutable();
                  blockTokens_.add(m);
                } else {
                  blockTokensBuilder_.addMessage(m);
                }
                break;
              } // case 82
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto b_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> bBuilder_;
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       * @return Whether the b field is set.
       */
      public boolean hasB() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       * @return The b.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getB() {
        if (bBuilder_ == null) {
          return b_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_;
        } else {
          return bBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public Builder setB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (bBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          b_ = value;
        } else {
          bBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public Builder setB(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
        if (bBuilder_ == null) {
          b_ = builderForValue.build();
        } else {
          bBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public Builder mergeB(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (bBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            b_ != null &&
            b_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
            getBBuilder().mergeFrom(value);
          } else {
            b_ = value;
          }
        } else {
          bBuilder_.mergeFrom(value);
        }
        if (b_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public Builder clearB() {
        bitField0_ = (bitField0_ & ~0x00000001);
        b_ = null;
        if (bBuilder_ != null) {
          bBuilder_.dispose();
          bBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getBFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBOrBuilder() {
        if (bBuilder_ != null) {
          return bBuilder_.getMessageOrBuilder();
        } else {
          return b_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : b_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto b = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> 
          getBFieldBuilder() {
        if (bBuilder_ == null) {
          bBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
                  getB(),
                  getParentForChildren(),
                  isClean());
          b_ = null;
        }
        return bBuilder_;
      }

      private long offset_ ;
      /**
       * <pre>
       * offset of first byte of block in the file
       * </pre>
       *
       * <code>required uint64 offset = 2;</code>
       * @return Whether the offset field is set.
       */
      @java.lang.Override
      public boolean hasOffset() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * offset of first byte of block in the file
       * </pre>
       *
       * <code>required uint64 offset = 2;</code>
       * @return The offset.
       */
      @java.lang.Override
      public long getOffset() {
        return offset_;
      }
      /**
       * <pre>
       * offset of first byte of block in the file
       * </pre>
       *
       * <code>required uint64 offset = 2;</code>
       * @param value The offset to set.
       * @return This builder for chaining.
       */
      public Builder setOffset(long value) {

        offset_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * offset of first byte of block in the file
       * </pre>
       *
       * <code>required uint64 offset = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearOffset() {
        bitField0_ = (bitField0_ & ~0x00000002);
        offset_ = 0L;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> locs_ =
        java.util.Collections.emptyList();
      private void ensureLocsIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          locs_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(locs_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> locsBuilder_;

      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getLocsList() {
        if (locsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(locs_);
        } else {
          return locsBuilder_.getMessageList();
        }
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public int getLocsCount() {
        if (locsBuilder_ == null) {
          return locs_.size();
        } else {
          return locsBuilder_.getCount();
        }
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getLocs(int index) {
        if (locsBuilder_ == null) {
          return locs_.get(index);
        } else {
          return locsBuilder_.getMessage(index);
        }
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder setLocs(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (locsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureLocsIsMutable();
          locs_.set(index, value);
          onChanged();
        } else {
          locsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder setLocs(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (locsBuilder_ == null) {
          ensureLocsIsMutable();
          locs_.set(index, builderForValue.build());
          onChanged();
        } else {
          locsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder addLocs(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (locsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureLocsIsMutable();
          locs_.add(value);
          onChanged();
        } else {
          locsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder addLocs(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (locsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureLocsIsMutable();
          locs_.add(index, value);
          onChanged();
        } else {
          locsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder addLocs(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (locsBuilder_ == null) {
          ensureLocsIsMutable();
          locs_.add(builderForValue.build());
          onChanged();
        } else {
          locsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder addLocs(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (locsBuilder_ == null) {
          ensureLocsIsMutable();
          locs_.add(index, builderForValue.build());
          onChanged();
        } else {
          locsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder addAllLocs(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
        if (locsBuilder_ == null) {
          ensureLocsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, locs_);
          onChanged();
        } else {
          locsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder clearLocs() {
        if (locsBuilder_ == null) {
          locs_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          locsBuilder_.clear();
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public Builder removeLocs(int index) {
        if (locsBuilder_ == null) {
          ensureLocsIsMutable();
          locs_.remove(index);
          onChanged();
        } else {
          locsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getLocsBuilder(
          int index) {
        return getLocsFieldBuilder().getBuilder(index);
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getLocsOrBuilder(
          int index) {
        if (locsBuilder_ == null) {
          return locs_.get(index);  } else {
          return locsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
           getLocsOrBuilderList() {
        if (locsBuilder_ != null) {
          return locsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(locs_);
        }
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder() {
        return getLocsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addLocsBuilder(
          int index) {
        return getLocsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <pre>
       * Locations ordered by proximity to client ip
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto locs = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder> 
           getLocsBuilderList() {
        return getLocsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getLocsFieldBuilder() {
        if (locsBuilder_ == null) {
          locsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  locs_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          locs_ = null;
        }
        return locsBuilder_;
      }

      private boolean corrupt_ ;
      /**
       * <pre>
       * true if all replicas of a block are corrupt, else false
       * </pre>
       *
       * <code>required bool corrupt = 4;</code>
       * @return Whether the corrupt field is set.
       */
      @java.lang.Override
      public boolean hasCorrupt() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * true if all replicas of a block are corrupt, else false
       * </pre>
       *
       * <code>required bool corrupt = 4;</code>
       * @return The corrupt.
       */
      @java.lang.Override
      public boolean getCorrupt() {
        return corrupt_;
      }
      /**
       * <pre>
       * true if all replicas of a block are corrupt, else false
       * </pre>
       *
       * <code>required bool corrupt = 4;</code>
       * @param value The corrupt to set.
       * @return This builder for chaining.
       */
      public Builder setCorrupt(boolean value) {

        corrupt_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * true if all replicas of a block are corrupt, else false
       * </pre>
       *
       * <code>required bool corrupt = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearCorrupt() {
        bitField0_ = (bitField0_ & ~0x00000008);
        corrupt_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.security.proto.SecurityProtos.TokenProto blockToken_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokenBuilder_;
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       * @return Whether the blockToken field is set.
       */
      public boolean hasBlockToken() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       * @return The blockToken.
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockToken() {
        if (blockTokenBuilder_ == null) {
          return blockToken_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_;
        } else {
          return blockTokenBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public Builder setBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokenBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blockToken_ = value;
        } else {
          blockTokenBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public Builder setBlockToken(
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokenBuilder_ == null) {
          blockToken_ = builderForValue.build();
        } else {
          blockTokenBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public Builder mergeBlockToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokenBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            blockToken_ != null &&
            blockToken_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
            getBlockTokenBuilder().mergeFrom(value);
          } else {
            blockToken_ = value;
          }
        } else {
          blockTokenBuilder_.mergeFrom(value);
        }
        if (blockToken_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public Builder clearBlockToken() {
        bitField0_ = (bitField0_ & ~0x00000010);
        blockToken_ = null;
        if (blockTokenBuilder_ != null) {
          blockTokenBuilder_.dispose();
          blockTokenBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokenBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getBlockTokenFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokenOrBuilder() {
        if (blockTokenBuilder_ != null) {
          return blockTokenBuilder_.getMessageOrBuilder();
        } else {
          return blockToken_ == null ?
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : blockToken_;
        }
      }
      /**
       * <code>required .hadoop.common.TokenProto blockToken = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
          getBlockTokenFieldBuilder() {
        if (blockTokenBuilder_ == null) {
          blockTokenBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
                  getBlockToken(),
                  getParentForChildren(),
                  isClean());
          blockToken_ = null;
        }
        return blockTokenBuilder_;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList isCached_ = emptyBooleanList();
      private void ensureIsCachedIsMutable() {
        if (!isCached_.isModifiable()) {
          isCached_ = makeMutableCopy(isCached_);
        }
        bitField0_ |= 0x00000020;
      }
      private void ensureIsCachedIsMutable(int capacity) {
        if (!isCached_.isModifiable()) {
          isCached_ = makeMutableCopy(isCached_, capacity);
        }
        bitField0_ |= 0x00000020;
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @return A list containing the isCached.
       */
      public java.util.List<java.lang.Boolean>
          getIsCachedList() {
        isCached_.makeImmutable();
        return isCached_;
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @return The count of isCached.
       */
      public int getIsCachedCount() {
        return isCached_.size();
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The isCached at the given index.
       */
      public boolean getIsCached(int index) {
        return isCached_.getBoolean(index);
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The isCached to set.
       * @return This builder for chaining.
       */
      public Builder setIsCached(
          int index, boolean value) {

        ensureIsCachedIsMutable();
        isCached_.setBoolean(index, value);
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @param value The isCached to add.
       * @return This builder for chaining.
       */
      public Builder addIsCached(boolean value) {

        ensureIsCachedIsMutable();
        isCached_.addBoolean(value);
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @param values The isCached to add.
       * @return This builder for chaining.
       */
      public Builder addAllIsCached(
          java.lang.Iterable<? extends java.lang.Boolean> values) {
        ensureIsCachedIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, isCached_);
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * if a location in locs is cached
       * </pre>
       *
       * <code>repeated bool isCached = 6 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearIsCached() {
        isCached_ = emptyBooleanList();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }

      private java.util.List<java.lang.Integer> storageTypes_ =
        java.util.Collections.emptyList();
      private void ensureStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000040) != 0)) {
          storageTypes_ = new java.util.ArrayList<java.lang.Integer>(storageTypes_);
          bitField0_ |= 0x00000040;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return A list containing the storageTypes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return The count of storageTypes.
       */
      public int getStorageTypesCount() {
        return storageTypes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param index The index of the element to return.
       * @return The storageTypes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
        return storageTypes_converter_.convert(storageTypes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param index The index to set the value at.
       * @param value The storageTypes to set.
       * @return This builder for chaining.
       */
      public Builder setStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param value The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param values The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
        ensureStorageTypesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
          storageTypes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageTypes() {
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000040);
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageIDs_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureStorageIDsIsMutable() {
        if (!storageIDs_.isModifiable()) {
          storageIDs_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIDs_);
        }
        bitField0_ |= 0x00000080;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @return A list containing the storageIDs.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getStorageIDsList() {
        storageIDs_.makeImmutable();
        return storageIDs_;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @return The count of storageIDs.
       */
      public int getStorageIDsCount() {
        return storageIDs_.size();
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param index The index of the element to return.
       * @return The storageIDs at the given index.
       */
      public java.lang.String getStorageIDs(int index) {
        return storageIDs_.get(index);
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param index The index of the value to return.
       * @return The bytes of the storageIDs at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageIDsBytes(int index) {
        return storageIDs_.getByteString(index);
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param index The index to set the value at.
       * @param value The storageIDs to set.
       * @return This builder for chaining.
       */
      public Builder setStorageIDs(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIDsIsMutable();
        storageIDs_.set(index, value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param value The storageIDs to add.
       * @return This builder for chaining.
       */
      public Builder addStorageIDs(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIDsIsMutable();
        storageIDs_.add(value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param values The storageIDs to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageIDs(
          java.lang.Iterable<java.lang.String> values) {
        ensureStorageIDsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, storageIDs_);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageIDs() {
        storageIDs_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000080);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIDs = 8;</code>
       * @param value The bytes of the storageIDs to add.
       * @return This builder for chaining.
       */
      public Builder addStorageIDsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIDsIsMutable();
        storageIDs_.add(value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString blockIndices_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <pre>
       * striped block related fields
       * </pre>
       *
       * <code>optional bytes blockIndices = 9;</code>
       * @return Whether the blockIndices field is set.
       */
      @java.lang.Override
      public boolean hasBlockIndices() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <pre>
       * striped block related fields
       * </pre>
       *
       * <code>optional bytes blockIndices = 9;</code>
       * @return The blockIndices.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockIndices() {
        return blockIndices_;
      }
      /**
       * <pre>
       * striped block related fields
       * </pre>
       *
       * <code>optional bytes blockIndices = 9;</code>
       * @param value The blockIndices to set.
       * @return This builder for chaining.
       */
      public Builder setBlockIndices(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockIndices_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * striped block related fields
       * </pre>
       *
       * <code>optional bytes blockIndices = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockIndices() {
        bitField0_ = (bitField0_ & ~0x00000100);
        blockIndices_ = getDefaultInstance().getBlockIndices();
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> blockTokens_ =
        java.util.Collections.emptyList();
      private void ensureBlockTokensIsMutable() {
        if (!((bitField0_ & 0x00000200) != 0)) {
          blockTokens_ = new java.util.ArrayList<org.apache.hadoop.security.proto.SecurityProtos.TokenProto>(blockTokens_);
          bitField0_ |= 0x00000200;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_;

      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> getBlockTokensList() {
        if (blockTokensBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blockTokens_);
        } else {
          return blockTokensBuilder_.getMessageList();
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public int getBlockTokensCount() {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.size();
        } else {
          return blockTokensBuilder_.getCount();
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.get(index);
        } else {
          return blockTokensBuilder_.getMessage(index);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder setBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.set(index, value);
          onChanged();
        } else {
          blockTokensBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder setBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.set(index, builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.add(value);
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder addBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.add(index, value);
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder addBlockTokens(
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.add(builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder addBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.add(index, builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder addAllBlockTokens(
          java.lang.Iterable<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blockTokens_);
          onChanged();
        } else {
          blockTokensBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder clearBlockTokens() {
        if (blockTokensBuilder_ == null) {
          blockTokens_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000200);
          onChanged();
        } else {
          blockTokensBuilder_.clear();
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public Builder removeBlockTokens(int index) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.remove(index);
          onChanged();
        } else {
          blockTokensBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder(
          int index) {
        return getBlockTokensFieldBuilder().getBuilder(index);
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
          int index) {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.get(index);  } else {
          return blockTokensBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
           getBlockTokensOrBuilderList() {
        if (blockTokensBuilder_ != null) {
          return blockTokensBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blockTokens_);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() {
        return getBlockTokensFieldBuilder().addBuilder(
            org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder(
          int index) {
        return getBlockTokensFieldBuilder().addBuilder(
            index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 10;</code>
       */
      public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder> 
           getBlockTokensBuilderList() {
        return getBlockTokensFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
          getBlockTokensFieldBuilder() {
        if (blockTokensBuilder_ == null) {
          blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
                  blockTokens_,
                  ((bitField0_ & 0x00000200) != 0),
                  getParentForChildren(),
                  isClean());
          blockTokens_ = null;
        }
        return blockTokensBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<LocatedBlockProto>() {
      @java.lang.Override
      public LocatedBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BatchedListingKeyProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedListingKeyProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes checksum = 1;</code>
     * @return Whether the checksum field is set.
     */
    boolean hasChecksum();
    /**
     * <code>required bytes checksum = 1;</code>
     * @return The checksum.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum();

    /**
     * <code>required uint32 pathIndex = 2;</code>
     * @return Whether the pathIndex field is set.
     */
    boolean hasPathIndex();
    /**
     * <code>required uint32 pathIndex = 2;</code>
     * @return The pathIndex.
     */
    int getPathIndex();

    /**
     * <code>required bytes startAfter = 3;</code>
     * @return Whether the startAfter field is set.
     */
    boolean hasStartAfter();
    /**
     * <code>required bytes startAfter = 3;</code>
     * @return The startAfter.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto}
   */
  public static final class BatchedListingKeyProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedListingKeyProto)
      BatchedListingKeyProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BatchedListingKeyProto.newBuilder() to construct.
    private BatchedListingKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BatchedListingKeyProto() {
      checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BatchedListingKeyProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class);
    }

    private int bitField0_;
    public static final int CHECKSUM_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes checksum = 1;</code>
     * @return Whether the checksum field is set.
     */
    @java.lang.Override
    public boolean hasChecksum() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes checksum = 1;</code>
     * @return The checksum.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() {
      return checksum_;
    }

    public static final int PATHINDEX_FIELD_NUMBER = 2;
    private int pathIndex_ = 0;
    /**
     * <code>required uint32 pathIndex = 2;</code>
     * @return Whether the pathIndex field is set.
     */
    @java.lang.Override
    public boolean hasPathIndex() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint32 pathIndex = 2;</code>
     * @return The pathIndex.
     */
    @java.lang.Override
    public int getPathIndex() {
      return pathIndex_;
    }

    public static final int STARTAFTER_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes startAfter = 3;</code>
     * @return Whether the startAfter field is set.
     */
    @java.lang.Override
    public boolean hasStartAfter() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bytes startAfter = 3;</code>
     * @return The startAfter.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() {
      return startAfter_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPathIndex()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStartAfter()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, checksum_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, pathIndex_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, startAfter_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, checksum_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, pathIndex_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, startAfter_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) obj;

      if (hasChecksum() != other.hasChecksum()) return false;
      if (hasChecksum()) {
        if (!getChecksum()
            .equals(other.getChecksum())) return false;
      }
      if (hasPathIndex() != other.hasPathIndex()) return false;
      if (hasPathIndex()) {
        if (getPathIndex()
            != other.getPathIndex()) return false;
      }
      if (hasStartAfter() != other.hasStartAfter()) return false;
      if (hasStartAfter()) {
        if (!getStartAfter()
            .equals(other.getStartAfter())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasChecksum()) {
        hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getChecksum().hashCode();
      }
      if (hasPathIndex()) {
        hash = (37 * hash) + PATHINDEX_FIELD_NUMBER;
        hash = (53 * hash) + getPathIndex();
      }
      if (hasStartAfter()) {
        hash = (37 * hash) + STARTAFTER_FIELD_NUMBER;
        hash = (53 * hash) + getStartAfter().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.BatchedListingKeyProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedListingKeyProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        pathIndex_ = 0;
        startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.checksum_ = checksum_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.pathIndex_ = pathIndex_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.startAfter_ = startAfter_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto.getDefaultInstance()) return this;
        if (other.hasChecksum()) {
          setChecksum(other.getChecksum());
        }
        if (other.hasPathIndex()) {
          setPathIndex(other.getPathIndex());
        }
        if (other.hasStartAfter()) {
          setStartAfter(other.getStartAfter());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasChecksum()) {
          return false;
        }
        if (!hasPathIndex()) {
          return false;
        }
        if (!hasStartAfter()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                checksum_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                pathIndex_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                startAfter_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString checksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes checksum = 1;</code>
       * @return Whether the checksum field is set.
       */
      @java.lang.Override
      public boolean hasChecksum() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes checksum = 1;</code>
       * @return The checksum.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getChecksum() {
        return checksum_;
      }
      /**
       * <code>required bytes checksum = 1;</code>
       * @param value The checksum to set.
       * @return This builder for chaining.
       */
      public Builder setChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        checksum_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes checksum = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearChecksum() {
        bitField0_ = (bitField0_ & ~0x00000001);
        checksum_ = getDefaultInstance().getChecksum();
        onChanged();
        return this;
      }

      private int pathIndex_ ;
      /**
       * <code>required uint32 pathIndex = 2;</code>
       * @return Whether the pathIndex field is set.
       */
      @java.lang.Override
      public boolean hasPathIndex() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 pathIndex = 2;</code>
       * @return The pathIndex.
       */
      @java.lang.Override
      public int getPathIndex() {
        return pathIndex_;
      }
      /**
       * <code>required uint32 pathIndex = 2;</code>
       * @param value The pathIndex to set.
       * @return This builder for chaining.
       */
      public Builder setPathIndex(int value) {

        pathIndex_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 pathIndex = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearPathIndex() {
        bitField0_ = (bitField0_ & ~0x00000002);
        pathIndex_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString startAfter_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes startAfter = 3;</code>
       * @return Whether the startAfter field is set.
       */
      @java.lang.Override
      public boolean hasStartAfter() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bytes startAfter = 3;</code>
       * @return The startAfter.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getStartAfter() {
        return startAfter_;
      }
      /**
       * <code>required bytes startAfter = 3;</code>
       * @param value The startAfter to set.
       * @return This builder for chaining.
       */
      public Builder setStartAfter(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        startAfter_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes startAfter = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearStartAfter() {
        bitField0_ = (bitField0_ & ~0x00000004);
        startAfter_ = getDefaultInstance().getStartAfter();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedListingKeyProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedListingKeyProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BatchedListingKeyProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BatchedListingKeyProto>() {
      @java.lang.Override
      public BatchedListingKeyProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BatchedListingKeyProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BatchedListingKeyProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedListingKeyProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DataEncryptionKeyProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataEncryptionKeyProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint32 keyId = 1;</code>
     * @return Whether the keyId field is set.
     */
    boolean hasKeyId();
    /**
     * <code>required uint32 keyId = 1;</code>
     * @return The keyId.
     */
    int getKeyId();

    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>required bytes nonce = 3;</code>
     * @return Whether the nonce field is set.
     */
    boolean hasNonce();
    /**
     * <code>required bytes nonce = 3;</code>
     * @return The nonce.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getNonce();

    /**
     * <code>required bytes encryptionKey = 4;</code>
     * @return Whether the encryptionKey field is set.
     */
    boolean hasEncryptionKey();
    /**
     * <code>required bytes encryptionKey = 4;</code>
     * @return The encryptionKey.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey();

    /**
     * <code>required uint64 expiryDate = 5;</code>
     * @return Whether the expiryDate field is set.
     */
    boolean hasExpiryDate();
    /**
     * <code>required uint64 expiryDate = 5;</code>
     * @return The expiryDate.
     */
    long getExpiryDate();

    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return Whether the encryptionAlgorithm field is set.
     */
    boolean hasEncryptionAlgorithm();
    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return The encryptionAlgorithm.
     */
    java.lang.String getEncryptionAlgorithm();
    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return The bytes for encryptionAlgorithm.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getEncryptionAlgorithmBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
   */
  public static final class DataEncryptionKeyProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DataEncryptionKeyProto)
      DataEncryptionKeyProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DataEncryptionKeyProto.newBuilder() to construct.
    private DataEncryptionKeyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DataEncryptionKeyProto() {
      blockPoolId_ = "";
      nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      encryptionAlgorithm_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DataEncryptionKeyProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
    }

    private int bitField0_;
    public static final int KEYID_FIELD_NUMBER = 1;
    private int keyId_ = 0;
    /**
     * <code>required uint32 keyId = 1;</code>
     * @return Whether the keyId field is set.
     */
    @java.lang.Override
    public boolean hasKeyId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint32 keyId = 1;</code>
     * @return The keyId.
     */
    @java.lang.Override
    public int getKeyId() {
      return keyId_;
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 2;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int NONCE_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes nonce = 3;</code>
     * @return Whether the nonce field is set.
     */
    @java.lang.Override
    public boolean hasNonce() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bytes nonce = 3;</code>
     * @return The nonce.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() {
      return nonce_;
    }

    public static final int ENCRYPTIONKEY_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes encryptionKey = 4;</code>
     * @return Whether the encryptionKey field is set.
     */
    @java.lang.Override
    public boolean hasEncryptionKey() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bytes encryptionKey = 4;</code>
     * @return The encryptionKey.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() {
      return encryptionKey_;
    }

    public static final int EXPIRYDATE_FIELD_NUMBER = 5;
    private long expiryDate_ = 0L;
    /**
     * <code>required uint64 expiryDate = 5;</code>
     * @return Whether the expiryDate field is set.
     */
    @java.lang.Override
    public boolean hasExpiryDate() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required uint64 expiryDate = 5;</code>
     * @return The expiryDate.
     */
    @java.lang.Override
    public long getExpiryDate() {
      return expiryDate_;
    }

    public static final int ENCRYPTIONALGORITHM_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private volatile java.lang.Object encryptionAlgorithm_ = "";
    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return Whether the encryptionAlgorithm field is set.
     */
    @java.lang.Override
    public boolean hasEncryptionAlgorithm() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return The encryptionAlgorithm.
     */
    @java.lang.Override
    public java.lang.String getEncryptionAlgorithm() {
      java.lang.Object ref = encryptionAlgorithm_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          encryptionAlgorithm_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string encryptionAlgorithm = 6;</code>
     * @return The bytes for encryptionAlgorithm.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getEncryptionAlgorithmBytes() {
      java.lang.Object ref = encryptionAlgorithm_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        encryptionAlgorithm_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasKeyId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNonce()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEncryptionKey()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasExpiryDate()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, keyId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, blockPoolId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, nonce_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, encryptionKey_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, expiryDate_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, encryptionAlgorithm_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, keyId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, blockPoolId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, nonce_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, encryptionKey_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, expiryDate_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, encryptionAlgorithm_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) obj;

      if (hasKeyId() != other.hasKeyId()) return false;
      if (hasKeyId()) {
        if (getKeyId()
            != other.getKeyId()) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (hasNonce() != other.hasNonce()) return false;
      if (hasNonce()) {
        if (!getNonce()
            .equals(other.getNonce())) return false;
      }
      if (hasEncryptionKey() != other.hasEncryptionKey()) return false;
      if (hasEncryptionKey()) {
        if (!getEncryptionKey()
            .equals(other.getEncryptionKey())) return false;
      }
      if (hasExpiryDate() != other.hasExpiryDate()) return false;
      if (hasExpiryDate()) {
        if (getExpiryDate()
            != other.getExpiryDate()) return false;
      }
      if (hasEncryptionAlgorithm() != other.hasEncryptionAlgorithm()) return false;
      if (hasEncryptionAlgorithm()) {
        if (!getEncryptionAlgorithm()
            .equals(other.getEncryptionAlgorithm())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasKeyId()) {
        hash = (37 * hash) + KEYID_FIELD_NUMBER;
        hash = (53 * hash) + getKeyId();
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (hasNonce()) {
        hash = (37 * hash) + NONCE_FIELD_NUMBER;
        hash = (53 * hash) + getNonce().hashCode();
      }
      if (hasEncryptionKey()) {
        hash = (37 * hash) + ENCRYPTIONKEY_FIELD_NUMBER;
        hash = (53 * hash) + getEncryptionKey().hashCode();
      }
      if (hasExpiryDate()) {
        hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getExpiryDate());
      }
      if (hasEncryptionAlgorithm()) {
        hash = (37 * hash) + ENCRYPTIONALGORITHM_FIELD_NUMBER;
        hash = (53 * hash) + getEncryptionAlgorithm().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.DataEncryptionKeyProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataEncryptionKeyProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        keyId_ = 0;
        blockPoolId_ = "";
        nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        expiryDate_ = 0L;
        encryptionAlgorithm_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.keyId_ = keyId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.nonce_ = nonce_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.encryptionKey_ = encryptionKey_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.expiryDate_ = expiryDate_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.encryptionAlgorithm_ = encryptionAlgorithm_;
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto.getDefaultInstance()) return this;
        if (other.hasKeyId()) {
          setKeyId(other.getKeyId());
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasNonce()) {
          setNonce(other.getNonce());
        }
        if (other.hasEncryptionKey()) {
          setEncryptionKey(other.getEncryptionKey());
        }
        if (other.hasExpiryDate()) {
          setExpiryDate(other.getExpiryDate());
        }
        if (other.hasEncryptionAlgorithm()) {
          encryptionAlgorithm_ = other.encryptionAlgorithm_;
          bitField0_ |= 0x00000020;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasKeyId()) {
          return false;
        }
        if (!hasBlockPoolId()) {
          return false;
        }
        if (!hasNonce()) {
          return false;
        }
        if (!hasEncryptionKey()) {
          return false;
        }
        if (!hasExpiryDate()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                keyId_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 18: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                nonce_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                encryptionKey_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                expiryDate_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 50: {
                encryptionAlgorithm_ = input.readBytes();
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int keyId_ ;
      /**
       * <code>required uint32 keyId = 1;</code>
       * @return Whether the keyId field is set.
       */
      @java.lang.Override
      public boolean hasKeyId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint32 keyId = 1;</code>
       * @return The keyId.
       */
      @java.lang.Override
      public int getKeyId() {
        return keyId_;
      }
      /**
       * <code>required uint32 keyId = 1;</code>
       * @param value The keyId to set.
       * @return This builder for chaining.
       */
      public Builder setKeyId(int value) {

        keyId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 keyId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearKeyId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        keyId_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 2;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString nonce_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes nonce = 3;</code>
       * @return Whether the nonce field is set.
       */
      @java.lang.Override
      public boolean hasNonce() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bytes nonce = 3;</code>
       * @return The nonce.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getNonce() {
        return nonce_;
      }
      /**
       * <code>required bytes nonce = 3;</code>
       * @param value The nonce to set.
       * @return This builder for chaining.
       */
      public Builder setNonce(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        nonce_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes nonce = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearNonce() {
        bitField0_ = (bitField0_ & ~0x00000004);
        nonce_ = getDefaultInstance().getNonce();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString encryptionKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes encryptionKey = 4;</code>
       * @return Whether the encryptionKey field is set.
       */
      @java.lang.Override
      public boolean hasEncryptionKey() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bytes encryptionKey = 4;</code>
       * @return The encryptionKey.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getEncryptionKey() {
        return encryptionKey_;
      }
      /**
       * <code>required bytes encryptionKey = 4;</code>
       * @param value The encryptionKey to set.
       * @return This builder for chaining.
       */
      public Builder setEncryptionKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        encryptionKey_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes encryptionKey = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearEncryptionKey() {
        bitField0_ = (bitField0_ & ~0x00000008);
        encryptionKey_ = getDefaultInstance().getEncryptionKey();
        onChanged();
        return this;
      }

      private long expiryDate_ ;
      /**
       * <code>required uint64 expiryDate = 5;</code>
       * @return Whether the expiryDate field is set.
       */
      @java.lang.Override
      public boolean hasExpiryDate() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required uint64 expiryDate = 5;</code>
       * @return The expiryDate.
       */
      @java.lang.Override
      public long getExpiryDate() {
        return expiryDate_;
      }
      /**
       * <code>required uint64 expiryDate = 5;</code>
       * @param value The expiryDate to set.
       * @return This builder for chaining.
       */
      public Builder setExpiryDate(long value) {

        expiryDate_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 expiryDate = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearExpiryDate() {
        bitField0_ = (bitField0_ & ~0x00000010);
        expiryDate_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object encryptionAlgorithm_ = "";
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @return Whether the encryptionAlgorithm field is set.
       */
      public boolean hasEncryptionAlgorithm() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @return The encryptionAlgorithm.
       */
      public java.lang.String getEncryptionAlgorithm() {
        java.lang.Object ref = encryptionAlgorithm_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            encryptionAlgorithm_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @return The bytes for encryptionAlgorithm.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getEncryptionAlgorithmBytes() {
        java.lang.Object ref = encryptionAlgorithm_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          encryptionAlgorithm_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @param value The encryptionAlgorithm to set.
       * @return This builder for chaining.
       */
      public Builder setEncryptionAlgorithm(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        encryptionAlgorithm_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearEncryptionAlgorithm() {
        encryptionAlgorithm_ = getDefaultInstance().getEncryptionAlgorithm();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }
      /**
       * <code>optional string encryptionAlgorithm = 6;</code>
       * @param value The bytes for encryptionAlgorithm to set.
       * @return This builder for chaining.
       */
      public Builder setEncryptionAlgorithmBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        encryptionAlgorithm_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataEncryptionKeyProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataEncryptionKeyProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DataEncryptionKeyProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DataEncryptionKeyProto>() {
      @java.lang.Override
      public DataEncryptionKeyProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DataEncryptionKeyProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DataEncryptionKeyProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface FileEncryptionInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FileEncryptionInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    boolean hasSuite();
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();

    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return Whether the cryptoProtocolVersion field is set.
     */
    boolean hasCryptoProtocolVersion();
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return The cryptoProtocolVersion.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();

    /**
     * <code>required bytes key = 3;</code>
     * @return Whether the key field is set.
     */
    boolean hasKey();
    /**
     * <code>required bytes key = 3;</code>
     * @return The key.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getKey();

    /**
     * <code>required bytes iv = 4;</code>
     * @return Whether the iv field is set.
     */
    boolean hasIv();
    /**
     * <code>required bytes iv = 4;</code>
     * @return The iv.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getIv();

    /**
     * <code>required string keyName = 5;</code>
     * @return Whether the keyName field is set.
     */
    boolean hasKeyName();
    /**
     * <code>required string keyName = 5;</code>
     * @return The keyName.
     */
    java.lang.String getKeyName();
    /**
     * <code>required string keyName = 5;</code>
     * @return The bytes for keyName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyNameBytes();

    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    boolean hasEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return The ezKeyVersionName.
     */
    java.lang.String getEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return The bytes for ezKeyVersionName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes();
  }
  /**
   * <pre>
   **
   * Encryption information for a file.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
   */
  public static final class FileEncryptionInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.FileEncryptionInfoProto)
      FileEncryptionInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FileEncryptionInfoProto.newBuilder() to construct.
    private FileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FileEncryptionInfoProto() {
      suite_ = 1;
      cryptoProtocolVersion_ = 1;
      key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      keyName_ = "";
      ezKeyVersionName_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FileEncryptionInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int SUITE_FIELD_NUMBER = 1;
    private int suite_ = 1;
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    @java.lang.Override public boolean hasSuite() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
    }

    public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
    private int cryptoProtocolVersion_ = 1;
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return Whether the cryptoProtocolVersion field is set.
     */
    @java.lang.Override public boolean hasCryptoProtocolVersion() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return The cryptoProtocolVersion.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result;
    }

    public static final int KEY_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes key = 3;</code>
     * @return Whether the key field is set.
     */
    @java.lang.Override
    public boolean hasKey() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bytes key = 3;</code>
     * @return The key.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
      return key_;
    }

    public static final int IV_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes iv = 4;</code>
     * @return Whether the iv field is set.
     */
    @java.lang.Override
    public boolean hasIv() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bytes iv = 4;</code>
     * @return The iv.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() {
      return iv_;
    }

    public static final int KEYNAME_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object keyName_ = "";
    /**
     * <code>required string keyName = 5;</code>
     * @return Whether the keyName field is set.
     */
    @java.lang.Override
    public boolean hasKeyName() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required string keyName = 5;</code>
     * @return The keyName.
     */
    @java.lang.Override
    public java.lang.String getKeyName() {
      java.lang.Object ref = keyName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          keyName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string keyName = 5;</code>
     * @return The bytes for keyName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyNameBytes() {
      java.lang.Object ref = keyName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        keyName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ezKeyVersionName_ = "";
    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    @java.lang.Override
    public boolean hasEzKeyVersionName() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return The ezKeyVersionName.
     */
    @java.lang.Override
    public java.lang.String getEzKeyVersionName() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ezKeyVersionName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string ezKeyVersionName = 6;</code>
     * @return The bytes for ezKeyVersionName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ezKeyVersionName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSuite()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCryptoProtocolVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasKey()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIv()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasKeyName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEzKeyVersionName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(2, cryptoProtocolVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, key_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, iv_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, keyName_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, ezKeyVersionName_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(2, cryptoProtocolVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, key_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, iv_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, keyName_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, ezKeyVersionName_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) obj;

      if (hasSuite() != other.hasSuite()) return false;
      if (hasSuite()) {
        if (suite_ != other.suite_) return false;
      }
      if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false;
      if (hasCryptoProtocolVersion()) {
        if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false;
      }
      if (hasKey() != other.hasKey()) return false;
      if (hasKey()) {
        if (!getKey()
            .equals(other.getKey())) return false;
      }
      if (hasIv() != other.hasIv()) return false;
      if (hasIv()) {
        if (!getIv()
            .equals(other.getIv())) return false;
      }
      if (hasKeyName() != other.hasKeyName()) return false;
      if (hasKeyName()) {
        if (!getKeyName()
            .equals(other.getKeyName())) return false;
      }
      if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false;
      if (hasEzKeyVersionName()) {
        if (!getEzKeyVersionName()
            .equals(other.getEzKeyVersionName())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSuite()) {
        hash = (37 * hash) + SUITE_FIELD_NUMBER;
        hash = (53 * hash) + suite_;
      }
      if (hasCryptoProtocolVersion()) {
        hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
        hash = (53 * hash) + cryptoProtocolVersion_;
      }
      if (hasKey()) {
        hash = (37 * hash) + KEY_FIELD_NUMBER;
        hash = (53 * hash) + getKey().hashCode();
      }
      if (hasIv()) {
        hash = (37 * hash) + IV_FIELD_NUMBER;
        hash = (53 * hash) + getIv().hashCode();
      }
      if (hasKeyName()) {
        hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
        hash = (53 * hash) + getKeyName().hashCode();
      }
      if (hasEzKeyVersionName()) {
        hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
        hash = (53 * hash) + getEzKeyVersionName().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Encryption information for a file.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.FileEncryptionInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FileEncryptionInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        suite_ = 1;
        cryptoProtocolVersion_ = 1;
        key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        keyName_ = "";
        ezKeyVersionName_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.suite_ = suite_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.key_ = key_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.iv_ = iv_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.keyName_ = keyName_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.ezKeyVersionName_ = ezKeyVersionName_;
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) return this;
        if (other.hasSuite()) {
          setSuite(other.getSuite());
        }
        if (other.hasCryptoProtocolVersion()) {
          setCryptoProtocolVersion(other.getCryptoProtocolVersion());
        }
        if (other.hasKey()) {
          setKey(other.getKey());
        }
        if (other.hasIv()) {
          setIv(other.getIv());
        }
        if (other.hasKeyName()) {
          keyName_ = other.keyName_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        if (other.hasEzKeyVersionName()) {
          ezKeyVersionName_ = other.ezKeyVersionName_;
          bitField0_ |= 0x00000020;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSuite()) {
          return false;
        }
        if (!hasCryptoProtocolVersion()) {
          return false;
        }
        if (!hasKey()) {
          return false;
        }
        if (!hasIv()) {
          return false;
        }
        if (!hasKeyName()) {
          return false;
        }
        if (!hasEzKeyVersionName()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  suite_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  cryptoProtocolVersion_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 16
              case 26: {
                key_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                iv_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                keyName_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 50: {
                ezKeyVersionName_ = input.readBytes();
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int suite_ = 1;
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return Whether the suite field is set.
       */
      @java.lang.Override public boolean hasSuite() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return The suite.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @param value The suite to set.
       * @return This builder for chaining.
       */
      public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        suite_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSuite() {
        bitField0_ = (bitField0_ & ~0x00000001);
        suite_ = 1;
        onChanged();
        return this;
      }

      private int cryptoProtocolVersion_ = 1;
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return Whether the cryptoProtocolVersion field is set.
       */
      @java.lang.Override public boolean hasCryptoProtocolVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return The cryptoProtocolVersion.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result;
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @param value The cryptoProtocolVersion to set.
       * @return This builder for chaining.
       */
      public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        cryptoProtocolVersion_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearCryptoProtocolVersion() {
        bitField0_ = (bitField0_ & ~0x00000002);
        cryptoProtocolVersion_ = 1;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes key = 3;</code>
       * @return Whether the key field is set.
       */
      @java.lang.Override
      public boolean hasKey() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bytes key = 3;</code>
       * @return The key.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
        return key_;
      }
      /**
       * <code>required bytes key = 3;</code>
       * @param value The key to set.
       * @return This builder for chaining.
       */
      public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        key_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes key = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearKey() {
        bitField0_ = (bitField0_ & ~0x00000004);
        key_ = getDefaultInstance().getKey();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes iv = 4;</code>
       * @return Whether the iv field is set.
       */
      @java.lang.Override
      public boolean hasIv() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bytes iv = 4;</code>
       * @return The iv.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() {
        return iv_;
      }
      /**
       * <code>required bytes iv = 4;</code>
       * @param value The iv to set.
       * @return This builder for chaining.
       */
      public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        iv_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes iv = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearIv() {
        bitField0_ = (bitField0_ & ~0x00000008);
        iv_ = getDefaultInstance().getIv();
        onChanged();
        return this;
      }

      private java.lang.Object keyName_ = "";
      /**
       * <code>required string keyName = 5;</code>
       * @return Whether the keyName field is set.
       */
      public boolean hasKeyName() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required string keyName = 5;</code>
       * @return The keyName.
       */
      public java.lang.String getKeyName() {
        java.lang.Object ref = keyName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            keyName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string keyName = 5;</code>
       * @return The bytes for keyName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getKeyNameBytes() {
        java.lang.Object ref = keyName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          keyName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string keyName = 5;</code>
       * @param value The keyName to set.
       * @return This builder for chaining.
       */
      public Builder setKeyName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        keyName_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required string keyName = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearKeyName() {
        keyName_ = getDefaultInstance().getKeyName();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * <code>required string keyName = 5;</code>
       * @param value The bytes for keyName to set.
       * @return This builder for chaining.
       */
      public Builder setKeyNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        keyName_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }

      private java.lang.Object ezKeyVersionName_ = "";
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @return Whether the ezKeyVersionName field is set.
       */
      public boolean hasEzKeyVersionName() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @return The ezKeyVersionName.
       */
      public java.lang.String getEzKeyVersionName() {
        java.lang.Object ref = ezKeyVersionName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ezKeyVersionName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @return The bytes for ezKeyVersionName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getEzKeyVersionNameBytes() {
        java.lang.Object ref = ezKeyVersionName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ezKeyVersionName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @param value The ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearEzKeyVersionName() {
        ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 6;</code>
       * @param value The bytes for ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FileEncryptionInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.FileEncryptionInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FileEncryptionInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FileEncryptionInfoProto>() {
      @java.lang.Override
      public FileEncryptionInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FileEncryptionInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FileEncryptionInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface PerFileEncryptionInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.PerFileEncryptionInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes key = 1;</code>
     * @return Whether the key field is set.
     */
    boolean hasKey();
    /**
     * <code>required bytes key = 1;</code>
     * @return The key.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getKey();

    /**
     * <code>required bytes iv = 2;</code>
     * @return Whether the iv field is set.
     */
    boolean hasIv();
    /**
     * <code>required bytes iv = 2;</code>
     * @return The iv.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getIv();

    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    boolean hasEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return The ezKeyVersionName.
     */
    java.lang.String getEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return The bytes for ezKeyVersionName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes();
  }
  /**
   * <pre>
   **
   * Encryption information for an individual
   * file within an encryption zone
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
   */
  public static final class PerFileEncryptionInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.PerFileEncryptionInfoProto)
      PerFileEncryptionInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use PerFileEncryptionInfoProto.newBuilder() to construct.
    private PerFileEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private PerFileEncryptionInfoProto() {
      key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      ezKeyVersionName_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new PerFileEncryptionInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int KEY_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes key = 1;</code>
     * @return Whether the key field is set.
     */
    @java.lang.Override
    public boolean hasKey() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes key = 1;</code>
     * @return The key.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
      return key_;
    }

    public static final int IV_FIELD_NUMBER = 2;
    private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes iv = 2;</code>
     * @return Whether the iv field is set.
     */
    @java.lang.Override
    public boolean hasIv() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required bytes iv = 2;</code>
     * @return The iv.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() {
      return iv_;
    }

    public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ezKeyVersionName_ = "";
    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    @java.lang.Override
    public boolean hasEzKeyVersionName() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return The ezKeyVersionName.
     */
    @java.lang.Override
    public java.lang.String getEzKeyVersionName() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ezKeyVersionName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string ezKeyVersionName = 3;</code>
     * @return The bytes for ezKeyVersionName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ezKeyVersionName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasKey()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIv()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEzKeyVersionName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, key_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBytes(2, iv_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, ezKeyVersionName_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, key_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(2, iv_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, ezKeyVersionName_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) obj;

      if (hasKey() != other.hasKey()) return false;
      if (hasKey()) {
        if (!getKey()
            .equals(other.getKey())) return false;
      }
      if (hasIv() != other.hasIv()) return false;
      if (hasIv()) {
        if (!getIv()
            .equals(other.getIv())) return false;
      }
      if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false;
      if (hasEzKeyVersionName()) {
        if (!getEzKeyVersionName()
            .equals(other.getEzKeyVersionName())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasKey()) {
        hash = (37 * hash) + KEY_FIELD_NUMBER;
        hash = (53 * hash) + getKey().hashCode();
      }
      if (hasIv()) {
        hash = (37 * hash) + IV_FIELD_NUMBER;
        hash = (53 * hash) + getIv().hashCode();
      }
      if (hasEzKeyVersionName()) {
        hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
        hash = (53 * hash) + getEzKeyVersionName().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Encryption information for an individual
     * file within an encryption zone
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.PerFileEncryptionInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.PerFileEncryptionInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        ezKeyVersionName_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.key_ = key_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.iv_ = iv_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.ezKeyVersionName_ = ezKeyVersionName_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto.getDefaultInstance()) return this;
        if (other.hasKey()) {
          setKey(other.getKey());
        }
        if (other.hasIv()) {
          setIv(other.getIv());
        }
        if (other.hasEzKeyVersionName()) {
          ezKeyVersionName_ = other.ezKeyVersionName_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasKey()) {
          return false;
        }
        if (!hasIv()) {
          return false;
        }
        if (!hasEzKeyVersionName()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                key_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                iv_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                ezKeyVersionName_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString key_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes key = 1;</code>
       * @return Whether the key field is set.
       */
      @java.lang.Override
      public boolean hasKey() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes key = 1;</code>
       * @return The key.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getKey() {
        return key_;
      }
      /**
       * <code>required bytes key = 1;</code>
       * @param value The key to set.
       * @return This builder for chaining.
       */
      public Builder setKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        key_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes key = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearKey() {
        bitField0_ = (bitField0_ & ~0x00000001);
        key_ = getDefaultInstance().getKey();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString iv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes iv = 2;</code>
       * @return Whether the iv field is set.
       */
      @java.lang.Override
      public boolean hasIv() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required bytes iv = 2;</code>
       * @return The iv.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getIv() {
        return iv_;
      }
      /**
       * <code>required bytes iv = 2;</code>
       * @param value The iv to set.
       * @return This builder for chaining.
       */
      public Builder setIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        iv_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes iv = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearIv() {
        bitField0_ = (bitField0_ & ~0x00000002);
        iv_ = getDefaultInstance().getIv();
        onChanged();
        return this;
      }

      private java.lang.Object ezKeyVersionName_ = "";
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @return Whether the ezKeyVersionName field is set.
       */
      public boolean hasEzKeyVersionName() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @return The ezKeyVersionName.
       */
      public java.lang.String getEzKeyVersionName() {
        java.lang.Object ref = ezKeyVersionName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ezKeyVersionName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @return The bytes for ezKeyVersionName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getEzKeyVersionNameBytes() {
        java.lang.Object ref = ezKeyVersionName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ezKeyVersionName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @param value The ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearEzKeyVersionName() {
        ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 3;</code>
       * @param value The bytes for ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.PerFileEncryptionInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<PerFileEncryptionInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<PerFileEncryptionInfoProto>() {
      @java.lang.Override
      public PerFileEncryptionInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<PerFileEncryptionInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<PerFileEncryptionInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.PerFileEncryptionInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ZoneEncryptionInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ZoneEncryptionInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    boolean hasSuite();
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();

    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return Whether the cryptoProtocolVersion field is set.
     */
    boolean hasCryptoProtocolVersion();
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return The cryptoProtocolVersion.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion();

    /**
     * <code>required string keyName = 3;</code>
     * @return Whether the keyName field is set.
     */
    boolean hasKeyName();
    /**
     * <code>required string keyName = 3;</code>
     * @return The keyName.
     */
    java.lang.String getKeyName();
    /**
     * <code>required string keyName = 3;</code>
     * @return The bytes for keyName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyNameBytes();

    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     * @return Whether the reencryptionProto field is set.
     */
    boolean hasReencryptionProto();
    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     * @return The reencryptionProto.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto();
    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder();
  }
  /**
   * <pre>
   **
   * Encryption information for an encryption
   * zone
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
   */
  public static final class ZoneEncryptionInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ZoneEncryptionInfoProto)
      ZoneEncryptionInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ZoneEncryptionInfoProto.newBuilder() to construct.
    private ZoneEncryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ZoneEncryptionInfoProto() {
      suite_ = 1;
      cryptoProtocolVersion_ = 1;
      keyName_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ZoneEncryptionInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int SUITE_FIELD_NUMBER = 1;
    private int suite_ = 1;
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    @java.lang.Override public boolean hasSuite() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
    }

    public static final int CRYPTOPROTOCOLVERSION_FIELD_NUMBER = 2;
    private int cryptoProtocolVersion_ = 1;
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return Whether the cryptoProtocolVersion field is set.
     */
    @java.lang.Override public boolean hasCryptoProtocolVersion() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
     * @return The cryptoProtocolVersion.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result;
    }

    public static final int KEYNAME_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object keyName_ = "";
    /**
     * <code>required string keyName = 3;</code>
     * @return Whether the keyName field is set.
     */
    @java.lang.Override
    public boolean hasKeyName() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required string keyName = 3;</code>
     * @return The keyName.
     */
    @java.lang.Override
    public java.lang.String getKeyName() {
      java.lang.Object ref = keyName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          keyName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string keyName = 3;</code>
     * @return The bytes for keyName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyNameBytes() {
      java.lang.Object ref = keyName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        keyName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int REENCRYPTIONPROTO_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_;
    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     * @return Whether the reencryptionProto field is set.
     */
    @java.lang.Override
    public boolean hasReencryptionProto() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     * @return The reencryptionProto.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() {
      return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_;
    }
    /**
     * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() {
      return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSuite()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCryptoProtocolVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasKeyName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasReencryptionProto()) {
        if (!getReencryptionProto().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeEnum(2, cryptoProtocolVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, keyName_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(4, getReencryptionProto());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(2, cryptoProtocolVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, keyName_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getReencryptionProto());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) obj;

      if (hasSuite() != other.hasSuite()) return false;
      if (hasSuite()) {
        if (suite_ != other.suite_) return false;
      }
      if (hasCryptoProtocolVersion() != other.hasCryptoProtocolVersion()) return false;
      if (hasCryptoProtocolVersion()) {
        if (cryptoProtocolVersion_ != other.cryptoProtocolVersion_) return false;
      }
      if (hasKeyName() != other.hasKeyName()) return false;
      if (hasKeyName()) {
        if (!getKeyName()
            .equals(other.getKeyName())) return false;
      }
      if (hasReencryptionProto() != other.hasReencryptionProto()) return false;
      if (hasReencryptionProto()) {
        if (!getReencryptionProto()
            .equals(other.getReencryptionProto())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSuite()) {
        hash = (37 * hash) + SUITE_FIELD_NUMBER;
        hash = (53 * hash) + suite_;
      }
      if (hasCryptoProtocolVersion()) {
        hash = (37 * hash) + CRYPTOPROTOCOLVERSION_FIELD_NUMBER;
        hash = (53 * hash) + cryptoProtocolVersion_;
      }
      if (hasKeyName()) {
        hash = (37 * hash) + KEYNAME_FIELD_NUMBER;
        hash = (53 * hash) + getKeyName().hashCode();
      }
      if (hasReencryptionProto()) {
        hash = (37 * hash) + REENCRYPTIONPROTO_FIELD_NUMBER;
        hash = (53 * hash) + getReencryptionProto().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Encryption information for an encryption
     * zone
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ZoneEncryptionInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ZoneEncryptionInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getReencryptionProtoFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        suite_ = 1;
        cryptoProtocolVersion_ = 1;
        keyName_ = "";
        reencryptionProto_ = null;
        if (reencryptionProtoBuilder_ != null) {
          reencryptionProtoBuilder_.dispose();
          reencryptionProtoBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.suite_ = suite_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.cryptoProtocolVersion_ = cryptoProtocolVersion_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.keyName_ = keyName_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.reencryptionProto_ = reencryptionProtoBuilder_ == null
              ? reencryptionProto_
              : reencryptionProtoBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto.getDefaultInstance()) return this;
        if (other.hasSuite()) {
          setSuite(other.getSuite());
        }
        if (other.hasCryptoProtocolVersion()) {
          setCryptoProtocolVersion(other.getCryptoProtocolVersion());
        }
        if (other.hasKeyName()) {
          keyName_ = other.keyName_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (other.hasReencryptionProto()) {
          mergeReencryptionProto(other.getReencryptionProto());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSuite()) {
          return false;
        }
        if (!hasCryptoProtocolVersion()) {
          return false;
        }
        if (!hasKeyName()) {
          return false;
        }
        if (hasReencryptionProto()) {
          if (!getReencryptionProto().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  suite_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  cryptoProtocolVersion_ = tmpRaw;
                  bitField0_ |= 0x00000002;
                }
                break;
              } // case 16
              case 26: {
                keyName_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getReencryptionProtoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int suite_ = 1;
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return Whether the suite field is set.
       */
      @java.lang.Override public boolean hasSuite() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return The suite.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @param value The suite to set.
       * @return This builder for chaining.
       */
      public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        suite_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSuite() {
        bitField0_ = (bitField0_ & ~0x00000001);
        suite_ = 1;
        onChanged();
        return this;
      }

      private int cryptoProtocolVersion_ = 1;
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return Whether the cryptoProtocolVersion field is set.
       */
      @java.lang.Override public boolean hasCryptoProtocolVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return The cryptoProtocolVersion.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto getCryptoProtocolVersion() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.forNumber(cryptoProtocolVersion_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION : result;
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @param value The cryptoProtocolVersion to set.
       * @return This builder for chaining.
       */
      public Builder setCryptoProtocolVersion(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000002;
        cryptoProtocolVersion_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.CryptoProtocolVersionProto cryptoProtocolVersion = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearCryptoProtocolVersion() {
        bitField0_ = (bitField0_ & ~0x00000002);
        cryptoProtocolVersion_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object keyName_ = "";
      /**
       * <code>required string keyName = 3;</code>
       * @return Whether the keyName field is set.
       */
      public boolean hasKeyName() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required string keyName = 3;</code>
       * @return The keyName.
       */
      public java.lang.String getKeyName() {
        java.lang.Object ref = keyName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            keyName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string keyName = 3;</code>
       * @return The bytes for keyName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getKeyNameBytes() {
        java.lang.Object ref = keyName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          keyName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string keyName = 3;</code>
       * @param value The keyName to set.
       * @return This builder for chaining.
       */
      public Builder setKeyName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        keyName_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required string keyName = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearKeyName() {
        keyName_ = getDefaultInstance().getKeyName();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>required string keyName = 3;</code>
       * @param value The bytes for keyName to set.
       * @return This builder for chaining.
       */
      public Builder setKeyNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        keyName_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto reencryptionProto_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> reencryptionProtoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       * @return Whether the reencryptionProto field is set.
       */
      public boolean hasReencryptionProto() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       * @return The reencryptionProto.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getReencryptionProto() {
        if (reencryptionProtoBuilder_ == null) {
          return reencryptionProto_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_;
        } else {
          return reencryptionProtoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public Builder setReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) {
        if (reencryptionProtoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          reencryptionProto_ = value;
        } else {
          reencryptionProtoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public Builder setReencryptionProto(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder builderForValue) {
        if (reencryptionProtoBuilder_ == null) {
          reencryptionProto_ = builderForValue.build();
        } else {
          reencryptionProtoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public Builder mergeReencryptionProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto value) {
        if (reencryptionProtoBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            reencryptionProto_ != null &&
            reencryptionProto_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) {
            getReencryptionProtoBuilder().mergeFrom(value);
          } else {
            reencryptionProto_ = value;
          }
        } else {
          reencryptionProtoBuilder_.mergeFrom(value);
        }
        if (reencryptionProto_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public Builder clearReencryptionProto() {
        bitField0_ = (bitField0_ & ~0x00000008);
        reencryptionProto_ = null;
        if (reencryptionProtoBuilder_ != null) {
          reencryptionProtoBuilder_.dispose();
          reencryptionProtoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder getReencryptionProtoBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getReencryptionProtoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder getReencryptionProtoOrBuilder() {
        if (reencryptionProtoBuilder_ != null) {
          return reencryptionProtoBuilder_.getMessageOrBuilder();
        } else {
          return reencryptionProto_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance() : reencryptionProto_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ReencryptionInfoProto reencryptionProto = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder> 
          getReencryptionProtoFieldBuilder() {
        if (reencryptionProtoBuilder_ == null) {
          reencryptionProtoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder>(
                  getReencryptionProto(),
                  getParentForChildren(),
                  isClean());
          reencryptionProto_ = null;
        }
        return reencryptionProtoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ZoneEncryptionInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ZoneEncryptionInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ZoneEncryptionInfoProto>() {
      @java.lang.Override
      public ZoneEncryptionInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ZoneEncryptionInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ZoneEncryptionInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReencryptionInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReencryptionInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    boolean hasEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return The ezKeyVersionName.
     */
    java.lang.String getEzKeyVersionName();
    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return The bytes for ezKeyVersionName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes();

    /**
     * <code>required uint64 submissionTime = 2;</code>
     * @return Whether the submissionTime field is set.
     */
    boolean hasSubmissionTime();
    /**
     * <code>required uint64 submissionTime = 2;</code>
     * @return The submissionTime.
     */
    long getSubmissionTime();

    /**
     * <code>required bool canceled = 3;</code>
     * @return Whether the canceled field is set.
     */
    boolean hasCanceled();
    /**
     * <code>required bool canceled = 3;</code>
     * @return The canceled.
     */
    boolean getCanceled();

    /**
     * <code>required int64 numReencrypted = 4;</code>
     * @return Whether the numReencrypted field is set.
     */
    boolean hasNumReencrypted();
    /**
     * <code>required int64 numReencrypted = 4;</code>
     * @return The numReencrypted.
     */
    long getNumReencrypted();

    /**
     * <code>required int64 numFailures = 5;</code>
     * @return Whether the numFailures field is set.
     */
    boolean hasNumFailures();
    /**
     * <code>required int64 numFailures = 5;</code>
     * @return The numFailures.
     */
    long getNumFailures();

    /**
     * <code>optional uint64 completionTime = 6;</code>
     * @return Whether the completionTime field is set.
     */
    boolean hasCompletionTime();
    /**
     * <code>optional uint64 completionTime = 6;</code>
     * @return The completionTime.
     */
    long getCompletionTime();

    /**
     * <code>optional string lastFile = 7;</code>
     * @return Whether the lastFile field is set.
     */
    boolean hasLastFile();
    /**
     * <code>optional string lastFile = 7;</code>
     * @return The lastFile.
     */
    java.lang.String getLastFile();
    /**
     * <code>optional string lastFile = 7;</code>
     * @return The bytes for lastFile.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getLastFileBytes();
  }
  /**
   * <pre>
   **
   * Re-encryption information for an encryption zone
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto}
   */
  public static final class ReencryptionInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReencryptionInfoProto)
      ReencryptionInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReencryptionInfoProto.newBuilder() to construct.
    private ReencryptionInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReencryptionInfoProto() {
      ezKeyVersionName_ = "";
      lastFile_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReencryptionInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int EZKEYVERSIONNAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object ezKeyVersionName_ = "";
    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return Whether the ezKeyVersionName field is set.
     */
    @java.lang.Override
    public boolean hasEzKeyVersionName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return The ezKeyVersionName.
     */
    @java.lang.Override
    public java.lang.String getEzKeyVersionName() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          ezKeyVersionName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string ezKeyVersionName = 1;</code>
     * @return The bytes for ezKeyVersionName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getEzKeyVersionNameBytes() {
      java.lang.Object ref = ezKeyVersionName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        ezKeyVersionName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SUBMISSIONTIME_FIELD_NUMBER = 2;
    private long submissionTime_ = 0L;
    /**
     * <code>required uint64 submissionTime = 2;</code>
     * @return Whether the submissionTime field is set.
     */
    @java.lang.Override
    public boolean hasSubmissionTime() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 submissionTime = 2;</code>
     * @return The submissionTime.
     */
    @java.lang.Override
    public long getSubmissionTime() {
      return submissionTime_;
    }

    public static final int CANCELED_FIELD_NUMBER = 3;
    private boolean canceled_ = false;
    /**
     * <code>required bool canceled = 3;</code>
     * @return Whether the canceled field is set.
     */
    @java.lang.Override
    public boolean hasCanceled() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bool canceled = 3;</code>
     * @return The canceled.
     */
    @java.lang.Override
    public boolean getCanceled() {
      return canceled_;
    }

    public static final int NUMREENCRYPTED_FIELD_NUMBER = 4;
    private long numReencrypted_ = 0L;
    /**
     * <code>required int64 numReencrypted = 4;</code>
     * @return Whether the numReencrypted field is set.
     */
    @java.lang.Override
    public boolean hasNumReencrypted() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required int64 numReencrypted = 4;</code>
     * @return The numReencrypted.
     */
    @java.lang.Override
    public long getNumReencrypted() {
      return numReencrypted_;
    }

    public static final int NUMFAILURES_FIELD_NUMBER = 5;
    private long numFailures_ = 0L;
    /**
     * <code>required int64 numFailures = 5;</code>
     * @return Whether the numFailures field is set.
     */
    @java.lang.Override
    public boolean hasNumFailures() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required int64 numFailures = 5;</code>
     * @return The numFailures.
     */
    @java.lang.Override
    public long getNumFailures() {
      return numFailures_;
    }

    public static final int COMPLETIONTIME_FIELD_NUMBER = 6;
    private long completionTime_ = 0L;
    /**
     * <code>optional uint64 completionTime = 6;</code>
     * @return Whether the completionTime field is set.
     */
    @java.lang.Override
    public boolean hasCompletionTime() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional uint64 completionTime = 6;</code>
     * @return The completionTime.
     */
    @java.lang.Override
    public long getCompletionTime() {
      return completionTime_;
    }

    public static final int LASTFILE_FIELD_NUMBER = 7;
    @SuppressWarnings("serial")
    private volatile java.lang.Object lastFile_ = "";
    /**
     * <code>optional string lastFile = 7;</code>
     * @return Whether the lastFile field is set.
     */
    @java.lang.Override
    public boolean hasLastFile() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional string lastFile = 7;</code>
     * @return The lastFile.
     */
    @java.lang.Override
    public java.lang.String getLastFile() {
      java.lang.Object ref = lastFile_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          lastFile_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string lastFile = 7;</code>
     * @return The bytes for lastFile.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getLastFileBytes() {
      java.lang.Object ref = lastFile_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        lastFile_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasEzKeyVersionName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSubmissionTime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCanceled()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumReencrypted()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasNumFailures()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, ezKeyVersionName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, submissionTime_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBool(3, canceled_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeInt64(4, numReencrypted_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeInt64(5, numFailures_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(6, completionTime_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 7, lastFile_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, ezKeyVersionName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, submissionTime_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(3, canceled_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(4, numReencrypted_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(5, numFailures_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, completionTime_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(7, lastFile_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) obj;

      if (hasEzKeyVersionName() != other.hasEzKeyVersionName()) return false;
      if (hasEzKeyVersionName()) {
        if (!getEzKeyVersionName()
            .equals(other.getEzKeyVersionName())) return false;
      }
      if (hasSubmissionTime() != other.hasSubmissionTime()) return false;
      if (hasSubmissionTime()) {
        if (getSubmissionTime()
            != other.getSubmissionTime()) return false;
      }
      if (hasCanceled() != other.hasCanceled()) return false;
      if (hasCanceled()) {
        if (getCanceled()
            != other.getCanceled()) return false;
      }
      if (hasNumReencrypted() != other.hasNumReencrypted()) return false;
      if (hasNumReencrypted()) {
        if (getNumReencrypted()
            != other.getNumReencrypted()) return false;
      }
      if (hasNumFailures() != other.hasNumFailures()) return false;
      if (hasNumFailures()) {
        if (getNumFailures()
            != other.getNumFailures()) return false;
      }
      if (hasCompletionTime() != other.hasCompletionTime()) return false;
      if (hasCompletionTime()) {
        if (getCompletionTime()
            != other.getCompletionTime()) return false;
      }
      if (hasLastFile() != other.hasLastFile()) return false;
      if (hasLastFile()) {
        if (!getLastFile()
            .equals(other.getLastFile())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasEzKeyVersionName()) {
        hash = (37 * hash) + EZKEYVERSIONNAME_FIELD_NUMBER;
        hash = (53 * hash) + getEzKeyVersionName().hashCode();
      }
      if (hasSubmissionTime()) {
        hash = (37 * hash) + SUBMISSIONTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSubmissionTime());
      }
      if (hasCanceled()) {
        hash = (37 * hash) + CANCELED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getCanceled());
      }
      if (hasNumReencrypted()) {
        hash = (37 * hash) + NUMREENCRYPTED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumReencrypted());
      }
      if (hasNumFailures()) {
        hash = (37 * hash) + NUMFAILURES_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumFailures());
      }
      if (hasCompletionTime()) {
        hash = (37 * hash) + COMPLETIONTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCompletionTime());
      }
      if (hasLastFile()) {
        hash = (37 * hash) + LASTFILE_FIELD_NUMBER;
        hash = (53 * hash) + getLastFile().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Re-encryption information for an encryption zone
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ReencryptionInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReencryptionInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        ezKeyVersionName_ = "";
        submissionTime_ = 0L;
        canceled_ = false;
        numReencrypted_ = 0L;
        numFailures_ = 0L;
        completionTime_ = 0L;
        lastFile_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.ezKeyVersionName_ = ezKeyVersionName_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.submissionTime_ = submissionTime_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.canceled_ = canceled_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.numReencrypted_ = numReencrypted_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.numFailures_ = numFailures_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.completionTime_ = completionTime_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.lastFile_ = lastFile_;
          to_bitField0_ |= 0x00000040;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto.getDefaultInstance()) return this;
        if (other.hasEzKeyVersionName()) {
          ezKeyVersionName_ = other.ezKeyVersionName_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasSubmissionTime()) {
          setSubmissionTime(other.getSubmissionTime());
        }
        if (other.hasCanceled()) {
          setCanceled(other.getCanceled());
        }
        if (other.hasNumReencrypted()) {
          setNumReencrypted(other.getNumReencrypted());
        }
        if (other.hasNumFailures()) {
          setNumFailures(other.getNumFailures());
        }
        if (other.hasCompletionTime()) {
          setCompletionTime(other.getCompletionTime());
        }
        if (other.hasLastFile()) {
          lastFile_ = other.lastFile_;
          bitField0_ |= 0x00000040;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasEzKeyVersionName()) {
          return false;
        }
        if (!hasSubmissionTime()) {
          return false;
        }
        if (!hasCanceled()) {
          return false;
        }
        if (!hasNumReencrypted()) {
          return false;
        }
        if (!hasNumFailures()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                ezKeyVersionName_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                submissionTime_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                canceled_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                numReencrypted_ = input.readInt64();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                numFailures_ = input.readInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                completionTime_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 58: {
                lastFile_ = input.readBytes();
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object ezKeyVersionName_ = "";
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @return Whether the ezKeyVersionName field is set.
       */
      public boolean hasEzKeyVersionName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @return The ezKeyVersionName.
       */
      public java.lang.String getEzKeyVersionName() {
        java.lang.Object ref = ezKeyVersionName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            ezKeyVersionName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @return The bytes for ezKeyVersionName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getEzKeyVersionNameBytes() {
        java.lang.Object ref = ezKeyVersionName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          ezKeyVersionName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @param value The ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearEzKeyVersionName() {
        ezKeyVersionName_ = getDefaultInstance().getEzKeyVersionName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string ezKeyVersionName = 1;</code>
       * @param value The bytes for ezKeyVersionName to set.
       * @return This builder for chaining.
       */
      public Builder setEzKeyVersionNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ezKeyVersionName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private long submissionTime_ ;
      /**
       * <code>required uint64 submissionTime = 2;</code>
       * @return Whether the submissionTime field is set.
       */
      @java.lang.Override
      public boolean hasSubmissionTime() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 submissionTime = 2;</code>
       * @return The submissionTime.
       */
      @java.lang.Override
      public long getSubmissionTime() {
        return submissionTime_;
      }
      /**
       * <code>required uint64 submissionTime = 2;</code>
       * @param value The submissionTime to set.
       * @return This builder for chaining.
       */
      public Builder setSubmissionTime(long value) {

        submissionTime_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 submissionTime = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSubmissionTime() {
        bitField0_ = (bitField0_ & ~0x00000002);
        submissionTime_ = 0L;
        onChanged();
        return this;
      }

      private boolean canceled_ ;
      /**
       * <code>required bool canceled = 3;</code>
       * @return Whether the canceled field is set.
       */
      @java.lang.Override
      public boolean hasCanceled() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bool canceled = 3;</code>
       * @return The canceled.
       */
      @java.lang.Override
      public boolean getCanceled() {
        return canceled_;
      }
      /**
       * <code>required bool canceled = 3;</code>
       * @param value The canceled to set.
       * @return This builder for chaining.
       */
      public Builder setCanceled(boolean value) {

        canceled_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bool canceled = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearCanceled() {
        bitField0_ = (bitField0_ & ~0x00000004);
        canceled_ = false;
        onChanged();
        return this;
      }

      private long numReencrypted_ ;
      /**
       * <code>required int64 numReencrypted = 4;</code>
       * @return Whether the numReencrypted field is set.
       */
      @java.lang.Override
      public boolean hasNumReencrypted() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required int64 numReencrypted = 4;</code>
       * @return The numReencrypted.
       */
      @java.lang.Override
      public long getNumReencrypted() {
        return numReencrypted_;
      }
      /**
       * <code>required int64 numReencrypted = 4;</code>
       * @param value The numReencrypted to set.
       * @return This builder for chaining.
       */
      public Builder setNumReencrypted(long value) {

        numReencrypted_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 numReencrypted = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumReencrypted() {
        bitField0_ = (bitField0_ & ~0x00000008);
        numReencrypted_ = 0L;
        onChanged();
        return this;
      }

      private long numFailures_ ;
      /**
       * <code>required int64 numFailures = 5;</code>
       * @return Whether the numFailures field is set.
       */
      @java.lang.Override
      public boolean hasNumFailures() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required int64 numFailures = 5;</code>
       * @return The numFailures.
       */
      @java.lang.Override
      public long getNumFailures() {
        return numFailures_;
      }
      /**
       * <code>required int64 numFailures = 5;</code>
       * @param value The numFailures to set.
       * @return This builder for chaining.
       */
      public Builder setNumFailures(long value) {

        numFailures_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 numFailures = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearNumFailures() {
        bitField0_ = (bitField0_ & ~0x00000010);
        numFailures_ = 0L;
        onChanged();
        return this;
      }

      private long completionTime_ ;
      /**
       * <code>optional uint64 completionTime = 6;</code>
       * @return Whether the completionTime field is set.
       */
      @java.lang.Override
      public boolean hasCompletionTime() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional uint64 completionTime = 6;</code>
       * @return The completionTime.
       */
      @java.lang.Override
      public long getCompletionTime() {
        return completionTime_;
      }
      /**
       * <code>optional uint64 completionTime = 6;</code>
       * @param value The completionTime to set.
       * @return This builder for chaining.
       */
      public Builder setCompletionTime(long value) {

        completionTime_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 completionTime = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearCompletionTime() {
        bitField0_ = (bitField0_ & ~0x00000020);
        completionTime_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object lastFile_ = "";
      /**
       * <code>optional string lastFile = 7;</code>
       * @return Whether the lastFile field is set.
       */
      public boolean hasLastFile() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional string lastFile = 7;</code>
       * @return The lastFile.
       */
      public java.lang.String getLastFile() {
        java.lang.Object ref = lastFile_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            lastFile_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string lastFile = 7;</code>
       * @return The bytes for lastFile.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getLastFileBytes() {
        java.lang.Object ref = lastFile_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          lastFile_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string lastFile = 7;</code>
       * @param value The lastFile to set.
       * @return This builder for chaining.
       */
      public Builder setLastFile(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        lastFile_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional string lastFile = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastFile() {
        lastFile_ = getDefaultInstance().getLastFile();
        bitField0_ = (bitField0_ & ~0x00000040);
        onChanged();
        return this;
      }
      /**
       * <code>optional string lastFile = 7;</code>
       * @param value The bytes for lastFile to set.
       * @return This builder for chaining.
       */
      public Builder setLastFileBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        lastFile_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReencryptionInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReencryptionInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReencryptionInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReencryptionInfoProto>() {
      @java.lang.Override
      public ReencryptionInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReencryptionInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReencryptionInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CipherOptionProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CipherOptionProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    boolean hasSuite();
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite();

    /**
     * <code>optional bytes inKey = 2;</code>
     * @return Whether the inKey field is set.
     */
    boolean hasInKey();
    /**
     * <code>optional bytes inKey = 2;</code>
     * @return The inKey.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getInKey();

    /**
     * <code>optional bytes inIv = 3;</code>
     * @return Whether the inIv field is set.
     */
    boolean hasInIv();
    /**
     * <code>optional bytes inIv = 3;</code>
     * @return The inIv.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getInIv();

    /**
     * <code>optional bytes outKey = 4;</code>
     * @return Whether the outKey field is set.
     */
    boolean hasOutKey();
    /**
     * <code>optional bytes outKey = 4;</code>
     * @return The outKey.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey();

    /**
     * <code>optional bytes outIv = 5;</code>
     * @return Whether the outIv field is set.
     */
    boolean hasOutIv();
    /**
     * <code>optional bytes outIv = 5;</code>
     * @return The outIv.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv();
  }
  /**
   * <pre>
   **
   * Cipher option
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.CipherOptionProto}
   */
  public static final class CipherOptionProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.CipherOptionProto)
      CipherOptionProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CipherOptionProto.newBuilder() to construct.
    private CipherOptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CipherOptionProto() {
      suite_ = 1;
      inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CipherOptionProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
    }

    private int bitField0_;
    public static final int SUITE_FIELD_NUMBER = 1;
    private int suite_ = 1;
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return Whether the suite field is set.
     */
    @java.lang.Override public boolean hasSuite() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
     * @return The suite.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
    }

    public static final int INKEY_FIELD_NUMBER = 2;
    private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes inKey = 2;</code>
     * @return Whether the inKey field is set.
     */
    @java.lang.Override
    public boolean hasInKey() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional bytes inKey = 2;</code>
     * @return The inKey.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() {
      return inKey_;
    }

    public static final int INIV_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes inIv = 3;</code>
     * @return Whether the inIv field is set.
     */
    @java.lang.Override
    public boolean hasInIv() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional bytes inIv = 3;</code>
     * @return The inIv.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() {
      return inIv_;
    }

    public static final int OUTKEY_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes outKey = 4;</code>
     * @return Whether the outKey field is set.
     */
    @java.lang.Override
    public boolean hasOutKey() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional bytes outKey = 4;</code>
     * @return The outKey.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() {
      return outKey_;
    }

    public static final int OUTIV_FIELD_NUMBER = 5;
    private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes outIv = 5;</code>
     * @return Whether the outIv field is set.
     */
    @java.lang.Override
    public boolean hasOutIv() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional bytes outIv = 5;</code>
     * @return The outIv.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() {
      return outIv_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSuite()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBytes(2, inKey_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, inIv_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, outKey_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBytes(5, outIv_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, suite_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(2, inKey_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, inIv_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, outKey_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(5, outIv_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) obj;

      if (hasSuite() != other.hasSuite()) return false;
      if (hasSuite()) {
        if (suite_ != other.suite_) return false;
      }
      if (hasInKey() != other.hasInKey()) return false;
      if (hasInKey()) {
        if (!getInKey()
            .equals(other.getInKey())) return false;
      }
      if (hasInIv() != other.hasInIv()) return false;
      if (hasInIv()) {
        if (!getInIv()
            .equals(other.getInIv())) return false;
      }
      if (hasOutKey() != other.hasOutKey()) return false;
      if (hasOutKey()) {
        if (!getOutKey()
            .equals(other.getOutKey())) return false;
      }
      if (hasOutIv() != other.hasOutIv()) return false;
      if (hasOutIv()) {
        if (!getOutIv()
            .equals(other.getOutIv())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSuite()) {
        hash = (37 * hash) + SUITE_FIELD_NUMBER;
        hash = (53 * hash) + suite_;
      }
      if (hasInKey()) {
        hash = (37 * hash) + INKEY_FIELD_NUMBER;
        hash = (53 * hash) + getInKey().hashCode();
      }
      if (hasInIv()) {
        hash = (37 * hash) + INIV_FIELD_NUMBER;
        hash = (53 * hash) + getInIv().hashCode();
      }
      if (hasOutKey()) {
        hash = (37 * hash) + OUTKEY_FIELD_NUMBER;
        hash = (53 * hash) + getOutKey().hashCode();
      }
      if (hasOutIv()) {
        hash = (37 * hash) + OUTIV_FIELD_NUMBER;
        hash = (53 * hash) + getOutIv().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Cipher option
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.CipherOptionProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CipherOptionProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        suite_ = 1;
        inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.suite_ = suite_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.inKey_ = inKey_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.inIv_ = inIv_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.outKey_ = outKey_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.outIv_ = outIv_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance()) return this;
        if (other.hasSuite()) {
          setSuite(other.getSuite());
        }
        if (other.hasInKey()) {
          setInKey(other.getInKey());
        }
        if (other.hasInIv()) {
          setInIv(other.getInIv());
        }
        if (other.hasOutKey()) {
          setOutKey(other.getOutKey());
        }
        if (other.hasOutIv()) {
          setOutIv(other.getOutIv());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSuite()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  suite_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                inKey_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                inIv_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                outKey_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                outIv_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int suite_ = 1;
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return Whether the suite field is set.
       */
      @java.lang.Override public boolean hasSuite() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return The suite.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto getSuite() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.forNumber(suite_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto.UNKNOWN : result;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @param value The suite to set.
       * @return This builder for chaining.
       */
      public Builder setSuite(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        suite_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.CipherSuiteProto suite = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSuite() {
        bitField0_ = (bitField0_ & ~0x00000001);
        suite_ = 1;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString inKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes inKey = 2;</code>
       * @return Whether the inKey field is set.
       */
      @java.lang.Override
      public boolean hasInKey() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bytes inKey = 2;</code>
       * @return The inKey.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getInKey() {
        return inKey_;
      }
      /**
       * <code>optional bytes inKey = 2;</code>
       * @param value The inKey to set.
       * @return This builder for chaining.
       */
      public Builder setInKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        inKey_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes inKey = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearInKey() {
        bitField0_ = (bitField0_ & ~0x00000002);
        inKey_ = getDefaultInstance().getInKey();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString inIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes inIv = 3;</code>
       * @return Whether the inIv field is set.
       */
      @java.lang.Override
      public boolean hasInIv() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes inIv = 3;</code>
       * @return The inIv.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getInIv() {
        return inIv_;
      }
      /**
       * <code>optional bytes inIv = 3;</code>
       * @param value The inIv to set.
       * @return This builder for chaining.
       */
      public Builder setInIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        inIv_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes inIv = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearInIv() {
        bitField0_ = (bitField0_ & ~0x00000004);
        inIv_ = getDefaultInstance().getInIv();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString outKey_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes outKey = 4;</code>
       * @return Whether the outKey field is set.
       */
      @java.lang.Override
      public boolean hasOutKey() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional bytes outKey = 4;</code>
       * @return The outKey.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getOutKey() {
        return outKey_;
      }
      /**
       * <code>optional bytes outKey = 4;</code>
       * @param value The outKey to set.
       * @return This builder for chaining.
       */
      public Builder setOutKey(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        outKey_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes outKey = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearOutKey() {
        bitField0_ = (bitField0_ & ~0x00000008);
        outKey_ = getDefaultInstance().getOutKey();
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString outIv_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes outIv = 5;</code>
       * @return Whether the outIv field is set.
       */
      @java.lang.Override
      public boolean hasOutIv() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional bytes outIv = 5;</code>
       * @return The outIv.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getOutIv() {
        return outIv_;
      }
      /**
       * <code>optional bytes outIv = 5;</code>
       * @param value The outIv to set.
       * @return This builder for chaining.
       */
      public Builder setOutIv(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        outIv_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes outIv = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearOutIv() {
        bitField0_ = (bitField0_ & ~0x00000010);
        outIv_ = getDefaultInstance().getOutIv();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CipherOptionProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.CipherOptionProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CipherOptionProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CipherOptionProto>() {
      @java.lang.Override
      public CipherOptionProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CipherOptionProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CipherOptionProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface LocatedBlocksProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.LocatedBlocksProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 fileLength = 1;</code>
     * @return Whether the fileLength field is set.
     */
    boolean hasFileLength();
    /**
     * <code>required uint64 fileLength = 1;</code>
     * @return The fileLength.
     */
    long getFileLength();

    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> 
        getBlocksList();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index);
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    int getBlocksCount();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
        getBlocksOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
        int index);

    /**
     * <code>required bool underConstruction = 3;</code>
     * @return Whether the underConstruction field is set.
     */
    boolean hasUnderConstruction();
    /**
     * <code>required bool underConstruction = 3;</code>
     * @return The underConstruction.
     */
    boolean getUnderConstruction();

    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     * @return Whether the lastBlock field is set.
     */
    boolean hasLastBlock();
    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     * @return The lastBlock.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock();
    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder();

    /**
     * <code>required bool isLastBlockComplete = 5;</code>
     * @return Whether the isLastBlockComplete field is set.
     */
    boolean hasIsLastBlockComplete();
    /**
     * <code>required bool isLastBlockComplete = 5;</code>
     * @return The isLastBlockComplete.
     */
    boolean getIsLastBlockComplete();

    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     * @return Whether the fileEncryptionInfo field is set.
     */
    boolean hasFileEncryptionInfo();
    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     * @return The fileEncryptionInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();

    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     * @return Whether the ecPolicy field is set.
     */
    boolean hasEcPolicy();
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     * @return The ecPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();
  }
  /**
   * <pre>
   **
   * A set of file blocks and their locations.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
   */
  public static final class LocatedBlocksProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.LocatedBlocksProto)
      LocatedBlocksProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use LocatedBlocksProto.newBuilder() to construct.
    private LocatedBlocksProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private LocatedBlocksProto() {
      blocks_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new LocatedBlocksProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
    }

    private int bitField0_;
    public static final int FILELENGTH_FIELD_NUMBER = 1;
    private long fileLength_ = 0L;
    /**
     * <code>required uint64 fileLength = 1;</code>
     * @return Whether the fileLength field is set.
     */
    @java.lang.Override
    public boolean hasFileLength() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 fileLength = 1;</code>
     * @return The fileLength.
     */
    @java.lang.Override
    public long getFileLength() {
      return fileLength_;
    }

    public static final int BLOCKS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_;
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
        getBlocksOrBuilderList() {
      return blocks_;
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    @java.lang.Override
    public int getBlocksCount() {
      return blocks_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
      return blocks_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
        int index) {
      return blocks_.get(index);
    }

    public static final int UNDERCONSTRUCTION_FIELD_NUMBER = 3;
    private boolean underConstruction_ = false;
    /**
     * <code>required bool underConstruction = 3;</code>
     * @return Whether the underConstruction field is set.
     */
    @java.lang.Override
    public boolean hasUnderConstruction() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required bool underConstruction = 3;</code>
     * @return The underConstruction.
     */
    @java.lang.Override
    public boolean getUnderConstruction() {
      return underConstruction_;
    }

    public static final int LASTBLOCK_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_;
    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     * @return Whether the lastBlock field is set.
     */
    @java.lang.Override
    public boolean hasLastBlock() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     * @return The lastBlock.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
      return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_;
    }
    /**
     * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
      return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_;
    }

    public static final int ISLASTBLOCKCOMPLETE_FIELD_NUMBER = 5;
    private boolean isLastBlockComplete_ = false;
    /**
     * <code>required bool isLastBlockComplete = 5;</code>
     * @return Whether the isLastBlockComplete field is set.
     */
    @java.lang.Override
    public boolean hasIsLastBlockComplete() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bool isLastBlockComplete = 5;</code>
     * @return The isLastBlockComplete.
     */
    @java.lang.Override
    public boolean getIsLastBlockComplete() {
      return isLastBlockComplete_;
    }

    public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 6;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     * @return Whether the fileEncryptionInfo field is set.
     */
    @java.lang.Override
    public boolean hasFileEncryptionInfo() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     * @return The fileEncryptionInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
      return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
    }
    /**
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
      return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
    }

    public static final int ECPOLICY_FIELD_NUMBER = 7;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     * @return Whether the ecPolicy field is set.
     */
    @java.lang.Override
    public boolean hasEcPolicy() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     * @return The ecPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasFileLength()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasUnderConstruction()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIsLastBlockComplete()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlocksCount(); i++) {
        if (!getBlocks(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasLastBlock()) {
        if (!getLastBlock().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasFileEncryptionInfo()) {
        if (!getFileEncryptionInfo().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasEcPolicy()) {
        if (!getEcPolicy().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, fileLength_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        output.writeMessage(2, blocks_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(3, underConstruction_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(4, getLastBlock());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBool(5, isLastBlockComplete_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(6, getFileEncryptionInfo());
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeMessage(7, getEcPolicy());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, fileLength_);
      }
      for (int i = 0; i < blocks_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, blocks_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(3, underConstruction_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getLastBlock());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(5, isLastBlockComplete_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(6, getFileEncryptionInfo());
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(7, getEcPolicy());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) obj;

      if (hasFileLength() != other.hasFileLength()) return false;
      if (hasFileLength()) {
        if (getFileLength()
            != other.getFileLength()) return false;
      }
      if (!getBlocksList()
          .equals(other.getBlocksList())) return false;
      if (hasUnderConstruction() != other.hasUnderConstruction()) return false;
      if (hasUnderConstruction()) {
        if (getUnderConstruction()
            != other.getUnderConstruction()) return false;
      }
      if (hasLastBlock() != other.hasLastBlock()) return false;
      if (hasLastBlock()) {
        if (!getLastBlock()
            .equals(other.getLastBlock())) return false;
      }
      if (hasIsLastBlockComplete() != other.hasIsLastBlockComplete()) return false;
      if (hasIsLastBlockComplete()) {
        if (getIsLastBlockComplete()
            != other.getIsLastBlockComplete()) return false;
      }
      if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false;
      if (hasFileEncryptionInfo()) {
        if (!getFileEncryptionInfo()
            .equals(other.getFileEncryptionInfo())) return false;
      }
      if (hasEcPolicy() != other.hasEcPolicy()) return false;
      if (hasEcPolicy()) {
        if (!getEcPolicy()
            .equals(other.getEcPolicy())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasFileLength()) {
        hash = (37 * hash) + FILELENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileLength());
      }
      if (getBlocksCount() > 0) {
        hash = (37 * hash) + BLOCKS_FIELD_NUMBER;
        hash = (53 * hash) + getBlocksList().hashCode();
      }
      if (hasUnderConstruction()) {
        hash = (37 * hash) + UNDERCONSTRUCTION_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getUnderConstruction());
      }
      if (hasLastBlock()) {
        hash = (37 * hash) + LASTBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + getLastBlock().hashCode();
      }
      if (hasIsLastBlockComplete()) {
        hash = (37 * hash) + ISLASTBLOCKCOMPLETE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsLastBlockComplete());
      }
      if (hasFileEncryptionInfo()) {
        hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
        hash = (53 * hash) + getFileEncryptionInfo().hashCode();
      }
      if (hasEcPolicy()) {
        hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getEcPolicy().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * A set of file blocks and their locations.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.LocatedBlocksProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.LocatedBlocksProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlocksFieldBuilder();
          getLastBlockFieldBuilder();
          getFileEncryptionInfoFieldBuilder();
          getEcPolicyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        fileLength_ = 0L;
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
        } else {
          blocks_ = null;
          blocksBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        underConstruction_ = false;
        lastBlock_ = null;
        if (lastBlockBuilder_ != null) {
          lastBlockBuilder_.dispose();
          lastBlockBuilder_ = null;
        }
        isLastBlockComplete_ = false;
        fileEncryptionInfo_ = null;
        if (fileEncryptionInfoBuilder_ != null) {
          fileEncryptionInfoBuilder_.dispose();
          fileEncryptionInfoBuilder_ = null;
        }
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result) {
        if (blocksBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            blocks_ = java.util.Collections.unmodifiableList(blocks_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.blocks_ = blocks_;
        } else {
          result.blocks_ = blocksBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.fileLength_ = fileLength_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.underConstruction_ = underConstruction_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.lastBlock_ = lastBlockBuilder_ == null
              ? lastBlock_
              : lastBlockBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.isLastBlockComplete_ = isLastBlockComplete_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_ == null
              ? fileEncryptionInfo_
              : fileEncryptionInfoBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.ecPolicy_ = ecPolicyBuilder_ == null
              ? ecPolicy_
              : ecPolicyBuilder_.build();
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) return this;
        if (other.hasFileLength()) {
          setFileLength(other.getFileLength());
        }
        if (blocksBuilder_ == null) {
          if (!other.blocks_.isEmpty()) {
            if (blocks_.isEmpty()) {
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureBlocksIsMutable();
              blocks_.addAll(other.blocks_);
            }
            onChanged();
          }
        } else {
          if (!other.blocks_.isEmpty()) {
            if (blocksBuilder_.isEmpty()) {
              blocksBuilder_.dispose();
              blocksBuilder_ = null;
              blocks_ = other.blocks_;
              bitField0_ = (bitField0_ & ~0x00000002);
              blocksBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlocksFieldBuilder() : null;
            } else {
              blocksBuilder_.addAllMessages(other.blocks_);
            }
          }
        }
        if (other.hasUnderConstruction()) {
          setUnderConstruction(other.getUnderConstruction());
        }
        if (other.hasLastBlock()) {
          mergeLastBlock(other.getLastBlock());
        }
        if (other.hasIsLastBlockComplete()) {
          setIsLastBlockComplete(other.getIsLastBlockComplete());
        }
        if (other.hasFileEncryptionInfo()) {
          mergeFileEncryptionInfo(other.getFileEncryptionInfo());
        }
        if (other.hasEcPolicy()) {
          mergeEcPolicy(other.getEcPolicy());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasFileLength()) {
          return false;
        }
        if (!hasUnderConstruction()) {
          return false;
        }
        if (!hasIsLastBlockComplete()) {
          return false;
        }
        for (int i = 0; i < getBlocksCount(); i++) {
          if (!getBlocks(i).isInitialized()) {
            return false;
          }
        }
        if (hasLastBlock()) {
          if (!getLastBlock().isInitialized()) {
            return false;
          }
        }
        if (hasFileEncryptionInfo()) {
          if (!getFileEncryptionInfo().isInitialized()) {
            return false;
          }
        }
        if (hasEcPolicy()) {
          if (!getEcPolicy().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                fileLength_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.PARSER,
                        extensionRegistry);
                if (blocksBuilder_ == null) {
                  ensureBlocksIsMutable();
                  blocks_.add(m);
                } else {
                  blocksBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 24: {
                underConstruction_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                input.readMessage(
                    getLastBlockFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                isLastBlockComplete_ = input.readBool();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 50: {
                input.readMessage(
                    getFileEncryptionInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              case 58: {
                input.readMessage(
                    getEcPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long fileLength_ ;
      /**
       * <code>required uint64 fileLength = 1;</code>
       * @return Whether the fileLength field is set.
       */
      @java.lang.Override
      public boolean hasFileLength() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 fileLength = 1;</code>
       * @return The fileLength.
       */
      @java.lang.Override
      public long getFileLength() {
        return fileLength_;
      }
      /**
       * <code>required uint64 fileLength = 1;</code>
       * @param value The fileLength to set.
       * @return This builder for chaining.
       */
      public Builder setFileLength(long value) {

        fileLength_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 fileLength = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileLength() {
        bitField0_ = (bitField0_ & ~0x00000001);
        fileLength_ = 0L;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> blocks_ =
        java.util.Collections.emptyList();
      private void ensureBlocksIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          blocks_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto>(blocks_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> blocksBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> getBlocksList() {
        if (blocksBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blocks_);
        } else {
          return blocksBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public int getBlocksCount() {
        if (blocksBuilder_ == null) {
          return blocks_.size();
        } else {
          return blocksBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getBlocks(int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);
        } else {
          return blocksBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.set(index, value);
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder setBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.set(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder addBlocks(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (blocksBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlocksIsMutable();
          blocks_.add(index, value);
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder addBlocks(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder addBlocks(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.add(index, builderForValue.build());
          onChanged();
        } else {
          blocksBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder addAllBlocks(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto> values) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blocks_);
          onChanged();
        } else {
          blocksBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder clearBlocks() {
        if (blocksBuilder_ == null) {
          blocks_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          blocksBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public Builder removeBlocks(int index) {
        if (blocksBuilder_ == null) {
          ensureBlocksIsMutable();
          blocks_.remove(index);
          onChanged();
        } else {
          blocksBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getBlocksOrBuilder(
          int index) {
        if (blocksBuilder_ == null) {
          return blocks_.get(index);  } else {
          return blocksBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
           getBlocksOrBuilderList() {
        if (blocksBuilder_ != null) {
          return blocksBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blocks_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder() {
        return getBlocksFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder addBlocksBuilder(
          int index) {
        return getBlocksFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.LocatedBlockProto blocks = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder> 
           getBlocksBuilderList() {
        return getBlocksFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
          getBlocksFieldBuilder() {
        if (blocksBuilder_ == null) {
          blocksBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
                  blocks_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          blocks_ = null;
        }
        return blocksBuilder_;
      }

      private boolean underConstruction_ ;
      /**
       * <code>required bool underConstruction = 3;</code>
       * @return Whether the underConstruction field is set.
       */
      @java.lang.Override
      public boolean hasUnderConstruction() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bool underConstruction = 3;</code>
       * @return The underConstruction.
       */
      @java.lang.Override
      public boolean getUnderConstruction() {
        return underConstruction_;
      }
      /**
       * <code>required bool underConstruction = 3;</code>
       * @param value The underConstruction to set.
       * @return This builder for chaining.
       */
      public Builder setUnderConstruction(boolean value) {

        underConstruction_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bool underConstruction = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearUnderConstruction() {
        bitField0_ = (bitField0_ & ~0x00000004);
        underConstruction_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto lastBlock_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> lastBlockBuilder_;
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       * @return Whether the lastBlock field is set.
       */
      public boolean hasLastBlock() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       * @return The lastBlock.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto getLastBlock() {
        if (lastBlockBuilder_ == null) {
          return lastBlock_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_;
        } else {
          return lastBlockBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public Builder setLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (lastBlockBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          lastBlock_ = value;
        } else {
          lastBlockBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public Builder setLastBlock(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder builderForValue) {
        if (lastBlockBuilder_ == null) {
          lastBlock_ = builderForValue.build();
        } else {
          lastBlockBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public Builder mergeLastBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto value) {
        if (lastBlockBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            lastBlock_ != null &&
            lastBlock_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance()) {
            getLastBlockBuilder().mergeFrom(value);
          } else {
            lastBlock_ = value;
          }
        } else {
          lastBlockBuilder_.mergeFrom(value);
        }
        if (lastBlock_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public Builder clearLastBlock() {
        bitField0_ = (bitField0_ & ~0x00000008);
        lastBlock_ = null;
        if (lastBlockBuilder_ != null) {
          lastBlockBuilder_.dispose();
          lastBlockBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder getLastBlockBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getLastBlockFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder getLastBlockOrBuilder() {
        if (lastBlockBuilder_ != null) {
          return lastBlockBuilder_.getMessageOrBuilder();
        } else {
          return lastBlock_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance() : lastBlock_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.LocatedBlockProto lastBlock = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder> 
          getLastBlockFieldBuilder() {
        if (lastBlockBuilder_ == null) {
          lastBlockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProtoOrBuilder>(
                  getLastBlock(),
                  getParentForChildren(),
                  isClean());
          lastBlock_ = null;
        }
        return lastBlockBuilder_;
      }

      private boolean isLastBlockComplete_ ;
      /**
       * <code>required bool isLastBlockComplete = 5;</code>
       * @return Whether the isLastBlockComplete field is set.
       */
      @java.lang.Override
      public boolean hasIsLastBlockComplete() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required bool isLastBlockComplete = 5;</code>
       * @return The isLastBlockComplete.
       */
      @java.lang.Override
      public boolean getIsLastBlockComplete() {
        return isLastBlockComplete_;
      }
      /**
       * <code>required bool isLastBlockComplete = 5;</code>
       * @param value The isLastBlockComplete to set.
       * @return This builder for chaining.
       */
      public Builder setIsLastBlockComplete(boolean value) {

        isLastBlockComplete_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required bool isLastBlockComplete = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearIsLastBlockComplete() {
        bitField0_ = (bitField0_ & ~0x00000010);
        isLastBlockComplete_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       * @return Whether the fileEncryptionInfo field is set.
       */
      public boolean hasFileEncryptionInfo() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       * @return The fileEncryptionInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
        if (fileEncryptionInfoBuilder_ == null) {
          return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
        } else {
          return fileEncryptionInfoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
        if (fileEncryptionInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          fileEncryptionInfo_ = value;
        } else {
          fileEncryptionInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public Builder setFileEncryptionInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
        if (fileEncryptionInfoBuilder_ == null) {
          fileEncryptionInfo_ = builderForValue.build();
        } else {
          fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
        if (fileEncryptionInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000020) != 0) &&
            fileEncryptionInfo_ != null &&
            fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
            getFileEncryptionInfoBuilder().mergeFrom(value);
          } else {
            fileEncryptionInfo_ = value;
          }
        } else {
          fileEncryptionInfoBuilder_.mergeFrom(value);
        }
        if (fileEncryptionInfo_ != null) {
          bitField0_ |= 0x00000020;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public Builder clearFileEncryptionInfo() {
        bitField0_ = (bitField0_ & ~0x00000020);
        fileEncryptionInfo_ = null;
        if (fileEncryptionInfoBuilder_ != null) {
          fileEncryptionInfoBuilder_.dispose();
          fileEncryptionInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
        bitField0_ |= 0x00000020;
        onChanged();
        return getFileEncryptionInfoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
        if (fileEncryptionInfoBuilder_ != null) {
          return fileEncryptionInfoBuilder_.getMessageOrBuilder();
        } else {
          return fileEncryptionInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 6;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> 
          getFileEncryptionInfoFieldBuilder() {
        if (fileEncryptionInfoBuilder_ == null) {
          fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
                  getFileEncryptionInfo(),
                  getParentForChildren(),
                  isClean());
          fileEncryptionInfo_ = null;
        }
        return fileEncryptionInfoBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       * @return Whether the ecPolicy field is set.
       */
      public boolean hasEcPolicy() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       * @return The ecPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
        if (ecPolicyBuilder_ == null) {
          return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        } else {
          return ecPolicyBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ecPolicy_ = value;
        } else {
          ecPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public Builder setEcPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (ecPolicyBuilder_ == null) {
          ecPolicy_ = builderForValue.build();
        } else {
          ecPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00000040) != 0) &&
            ecPolicy_ != null &&
            ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
            getEcPolicyBuilder().mergeFrom(value);
          } else {
            ecPolicy_ = value;
          }
        } else {
          ecPolicyBuilder_.mergeFrom(value);
        }
        if (ecPolicy_ != null) {
          bitField0_ |= 0x00000040;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public Builder clearEcPolicy() {
        bitField0_ = (bitField0_ & ~0x00000040);
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
        bitField0_ |= 0x00000040;
        onChanged();
        return getEcPolicyFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
        if (ecPolicyBuilder_ != null) {
          return ecPolicyBuilder_.getMessageOrBuilder();
        } else {
          return ecPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        }
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 7;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
          getEcPolicyFieldBuilder() {
        if (ecPolicyBuilder_ == null) {
          ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
                  getEcPolicy(),
                  getParentForChildren(),
                  isClean());
          ecPolicy_ = null;
        }
        return ecPolicyBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.LocatedBlocksProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.LocatedBlocksProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlocksProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<LocatedBlocksProto>() {
      @java.lang.Override
      public LocatedBlocksProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlocksProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<LocatedBlocksProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ECSchemaOptionEntryProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaOptionEntryProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string key = 1;</code>
     * @return Whether the key field is set.
     */
    boolean hasKey();
    /**
     * <code>required string key = 1;</code>
     * @return The key.
     */
    java.lang.String getKey();
    /**
     * <code>required string key = 1;</code>
     * @return The bytes for key.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyBytes();

    /**
     * <code>required string value = 2;</code>
     * @return Whether the value field is set.
     */
    boolean hasValue();
    /**
     * <code>required string value = 2;</code>
     * @return The value.
     */
    java.lang.String getValue();
    /**
     * <code>required string value = 2;</code>
     * @return The bytes for value.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getValueBytes();
  }
  /**
   * <pre>
   **
   * ECSchema options entry
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto}
   */
  public static final class ECSchemaOptionEntryProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaOptionEntryProto)
      ECSchemaOptionEntryProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ECSchemaOptionEntryProto.newBuilder() to construct.
    private ECSchemaOptionEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ECSchemaOptionEntryProto() {
      key_ = "";
      value_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ECSchemaOptionEntryProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class);
    }

    private int bitField0_;
    public static final int KEY_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object key_ = "";
    /**
     * <code>required string key = 1;</code>
     * @return Whether the key field is set.
     */
    @java.lang.Override
    public boolean hasKey() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string key = 1;</code>
     * @return The key.
     */
    @java.lang.Override
    public java.lang.String getKey() {
      java.lang.Object ref = key_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          key_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string key = 1;</code>
     * @return The bytes for key.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyBytes() {
      java.lang.Object ref = key_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        key_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int VALUE_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object value_ = "";
    /**
     * <code>required string value = 2;</code>
     * @return Whether the value field is set.
     */
    @java.lang.Override
    public boolean hasValue() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string value = 2;</code>
     * @return The value.
     */
    @java.lang.Override
    public java.lang.String getValue() {
      java.lang.Object ref = value_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          value_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string value = 2;</code>
     * @return The bytes for value.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getValueBytes() {
      java.lang.Object ref = value_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        value_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasKey()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasValue()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, key_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, value_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, key_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, value_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) obj;

      if (hasKey() != other.hasKey()) return false;
      if (hasKey()) {
        if (!getKey()
            .equals(other.getKey())) return false;
      }
      if (hasValue() != other.hasValue()) return false;
      if (hasValue()) {
        if (!getValue()
            .equals(other.getValue())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasKey()) {
        hash = (37 * hash) + KEY_FIELD_NUMBER;
        hash = (53 * hash) + getKey().hashCode();
      }
      if (hasValue()) {
        hash = (37 * hash) + VALUE_FIELD_NUMBER;
        hash = (53 * hash) + getValue().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * ECSchema options entry
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ECSchemaOptionEntryProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaOptionEntryProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        key_ = "";
        value_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.key_ = key_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.value_ = value_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance()) return this;
        if (other.hasKey()) {
          key_ = other.key_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasValue()) {
          value_ = other.value_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasKey()) {
          return false;
        }
        if (!hasValue()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                key_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                value_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object key_ = "";
      /**
       * <code>required string key = 1;</code>
       * @return Whether the key field is set.
       */
      public boolean hasKey() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string key = 1;</code>
       * @return The key.
       */
      public java.lang.String getKey() {
        java.lang.Object ref = key_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            key_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string key = 1;</code>
       * @return The bytes for key.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getKeyBytes() {
        java.lang.Object ref = key_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          key_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string key = 1;</code>
       * @param value The key to set.
       * @return This builder for chaining.
       */
      public Builder setKey(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        key_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string key = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearKey() {
        key_ = getDefaultInstance().getKey();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string key = 1;</code>
       * @param value The bytes for key to set.
       * @return This builder for chaining.
       */
      public Builder setKeyBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        key_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object value_ = "";
      /**
       * <code>required string value = 2;</code>
       * @return Whether the value field is set.
       */
      public boolean hasValue() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string value = 2;</code>
       * @return The value.
       */
      public java.lang.String getValue() {
        java.lang.Object ref = value_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            value_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string value = 2;</code>
       * @return The bytes for value.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getValueBytes() {
        java.lang.Object ref = value_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          value_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string value = 2;</code>
       * @param value The value to set.
       * @return This builder for chaining.
       */
      public Builder setValue(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        value_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string value = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearValue() {
        value_ = getDefaultInstance().getValue();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string value = 2;</code>
       * @param value The bytes for value to set.
       * @return This builder for chaining.
       */
      public Builder setValueBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        value_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaOptionEntryProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaOptionEntryProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaOptionEntryProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ECSchemaOptionEntryProto>() {
      @java.lang.Override
      public ECSchemaOptionEntryProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaOptionEntryProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaOptionEntryProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ECSchemaProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECSchemaProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string codecName = 1;</code>
     * @return Whether the codecName field is set.
     */
    boolean hasCodecName();
    /**
     * <code>required string codecName = 1;</code>
     * @return The codecName.
     */
    java.lang.String getCodecName();
    /**
     * <code>required string codecName = 1;</code>
     * @return The bytes for codecName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCodecNameBytes();

    /**
     * <code>required uint32 dataUnits = 2;</code>
     * @return Whether the dataUnits field is set.
     */
    boolean hasDataUnits();
    /**
     * <code>required uint32 dataUnits = 2;</code>
     * @return The dataUnits.
     */
    int getDataUnits();

    /**
     * <code>required uint32 parityUnits = 3;</code>
     * @return Whether the parityUnits field is set.
     */
    boolean hasParityUnits();
    /**
     * <code>required uint32 parityUnits = 3;</code>
     * @return The parityUnits.
     */
    int getParityUnits();

    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> 
        getOptionsList();
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index);
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    int getOptionsCount();
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> 
        getOptionsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * ECSchema for erasurecoding
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ECSchemaProto}
   */
  public static final class ECSchemaProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECSchemaProto)
      ECSchemaProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ECSchemaProto.newBuilder() to construct.
    private ECSchemaProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ECSchemaProto() {
      codecName_ = "";
      options_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ECSchemaProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class);
    }

    private int bitField0_;
    public static final int CODECNAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object codecName_ = "";
    /**
     * <code>required string codecName = 1;</code>
     * @return Whether the codecName field is set.
     */
    @java.lang.Override
    public boolean hasCodecName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string codecName = 1;</code>
     * @return The codecName.
     */
    @java.lang.Override
    public java.lang.String getCodecName() {
      java.lang.Object ref = codecName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          codecName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string codecName = 1;</code>
     * @return The bytes for codecName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCodecNameBytes() {
      java.lang.Object ref = codecName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        codecName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int DATAUNITS_FIELD_NUMBER = 2;
    private int dataUnits_ = 0;
    /**
     * <code>required uint32 dataUnits = 2;</code>
     * @return Whether the dataUnits field is set.
     */
    @java.lang.Override
    public boolean hasDataUnits() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint32 dataUnits = 2;</code>
     * @return The dataUnits.
     */
    @java.lang.Override
    public int getDataUnits() {
      return dataUnits_;
    }

    public static final int PARITYUNITS_FIELD_NUMBER = 3;
    private int parityUnits_ = 0;
    /**
     * <code>required uint32 parityUnits = 3;</code>
     * @return Whether the parityUnits field is set.
     */
    @java.lang.Override
    public boolean hasParityUnits() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint32 parityUnits = 3;</code>
     * @return The parityUnits.
     */
    @java.lang.Override
    public int getParityUnits() {
      return parityUnits_;
    }

    public static final int OPTIONS_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> options_;
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> getOptionsList() {
      return options_;
    }
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> 
        getOptionsOrBuilderList() {
      return options_;
    }
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    @java.lang.Override
    public int getOptionsCount() {
      return options_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) {
      return options_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
        int index) {
      return options_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasCodecName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDataUnits()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasParityUnits()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getOptionsCount(); i++) {
        if (!getOptions(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, codecName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, dataUnits_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, parityUnits_);
      }
      for (int i = 0; i < options_.size(); i++) {
        output.writeMessage(4, options_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, codecName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, dataUnits_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, parityUnits_);
      }
      for (int i = 0; i < options_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, options_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) obj;

      if (hasCodecName() != other.hasCodecName()) return false;
      if (hasCodecName()) {
        if (!getCodecName()
            .equals(other.getCodecName())) return false;
      }
      if (hasDataUnits() != other.hasDataUnits()) return false;
      if (hasDataUnits()) {
        if (getDataUnits()
            != other.getDataUnits()) return false;
      }
      if (hasParityUnits() != other.hasParityUnits()) return false;
      if (hasParityUnits()) {
        if (getParityUnits()
            != other.getParityUnits()) return false;
      }
      if (!getOptionsList()
          .equals(other.getOptionsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCodecName()) {
        hash = (37 * hash) + CODECNAME_FIELD_NUMBER;
        hash = (53 * hash) + getCodecName().hashCode();
      }
      if (hasDataUnits()) {
        hash = (37 * hash) + DATAUNITS_FIELD_NUMBER;
        hash = (53 * hash) + getDataUnits();
      }
      if (hasParityUnits()) {
        hash = (37 * hash) + PARITYUNITS_FIELD_NUMBER;
        hash = (53 * hash) + getParityUnits();
      }
      if (getOptionsCount() > 0) {
        hash = (37 * hash) + OPTIONS_FIELD_NUMBER;
        hash = (53 * hash) + getOptionsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * ECSchema for erasurecoding
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ECSchemaProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECSchemaProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        codecName_ = "";
        dataUnits_ = 0;
        parityUnits_ = 0;
        if (optionsBuilder_ == null) {
          options_ = java.util.Collections.emptyList();
        } else {
          options_ = null;
          optionsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000008);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result) {
        if (optionsBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0)) {
            options_ = java.util.Collections.unmodifiableList(options_);
            bitField0_ = (bitField0_ & ~0x00000008);
          }
          result.options_ = options_;
        } else {
          result.options_ = optionsBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.codecName_ = codecName_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.dataUnits_ = dataUnits_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.parityUnits_ = parityUnits_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) return this;
        if (other.hasCodecName()) {
          codecName_ = other.codecName_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasDataUnits()) {
          setDataUnits(other.getDataUnits());
        }
        if (other.hasParityUnits()) {
          setParityUnits(other.getParityUnits());
        }
        if (optionsBuilder_ == null) {
          if (!other.options_.isEmpty()) {
            if (options_.isEmpty()) {
              options_ = other.options_;
              bitField0_ = (bitField0_ & ~0x00000008);
            } else {
              ensureOptionsIsMutable();
              options_.addAll(other.options_);
            }
            onChanged();
          }
        } else {
          if (!other.options_.isEmpty()) {
            if (optionsBuilder_.isEmpty()) {
              optionsBuilder_.dispose();
              optionsBuilder_ = null;
              options_ = other.options_;
              bitField0_ = (bitField0_ & ~0x00000008);
              optionsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getOptionsFieldBuilder() : null;
            } else {
              optionsBuilder_.addAllMessages(other.options_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasCodecName()) {
          return false;
        }
        if (!hasDataUnits()) {
          return false;
        }
        if (!hasParityUnits()) {
          return false;
        }
        for (int i = 0; i < getOptionsCount(); i++) {
          if (!getOptions(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                codecName_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                dataUnits_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                parityUnits_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.PARSER,
                        extensionRegistry);
                if (optionsBuilder_ == null) {
                  ensureOptionsIsMutable();
                  options_.add(m);
                } else {
                  optionsBuilder_.addMessage(m);
                }
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object codecName_ = "";
      /**
       * <code>required string codecName = 1;</code>
       * @return Whether the codecName field is set.
       */
      public boolean hasCodecName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string codecName = 1;</code>
       * @return The codecName.
       */
      public java.lang.String getCodecName() {
        java.lang.Object ref = codecName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            codecName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string codecName = 1;</code>
       * @return The bytes for codecName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCodecNameBytes() {
        java.lang.Object ref = codecName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          codecName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string codecName = 1;</code>
       * @param value The codecName to set.
       * @return This builder for chaining.
       */
      public Builder setCodecName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        codecName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string codecName = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCodecName() {
        codecName_ = getDefaultInstance().getCodecName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string codecName = 1;</code>
       * @param value The bytes for codecName to set.
       * @return This builder for chaining.
       */
      public Builder setCodecNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        codecName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private int dataUnits_ ;
      /**
       * <code>required uint32 dataUnits = 2;</code>
       * @return Whether the dataUnits field is set.
       */
      @java.lang.Override
      public boolean hasDataUnits() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 dataUnits = 2;</code>
       * @return The dataUnits.
       */
      @java.lang.Override
      public int getDataUnits() {
        return dataUnits_;
      }
      /**
       * <code>required uint32 dataUnits = 2;</code>
       * @param value The dataUnits to set.
       * @return This builder for chaining.
       */
      public Builder setDataUnits(int value) {

        dataUnits_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 dataUnits = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearDataUnits() {
        bitField0_ = (bitField0_ & ~0x00000002);
        dataUnits_ = 0;
        onChanged();
        return this;
      }

      private int parityUnits_ ;
      /**
       * <code>required uint32 parityUnits = 3;</code>
       * @return Whether the parityUnits field is set.
       */
      @java.lang.Override
      public boolean hasParityUnits() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint32 parityUnits = 3;</code>
       * @return The parityUnits.
       */
      @java.lang.Override
      public int getParityUnits() {
        return parityUnits_;
      }
      /**
       * <code>required uint32 parityUnits = 3;</code>
       * @param value The parityUnits to set.
       * @return This builder for chaining.
       */
      public Builder setParityUnits(int value) {

        parityUnits_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 parityUnits = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearParityUnits() {
        bitField0_ = (bitField0_ & ~0x00000004);
        parityUnits_ = 0;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> options_ =
        java.util.Collections.emptyList();
      private void ensureOptionsIsMutable() {
        if (!((bitField0_ & 0x00000008) != 0)) {
          options_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto>(options_);
          bitField0_ |= 0x00000008;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> optionsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> getOptionsList() {
        if (optionsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(options_);
        } else {
          return optionsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public int getOptionsCount() {
        if (optionsBuilder_ == null) {
          return options_.size();
        } else {
          return optionsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto getOptions(int index) {
        if (optionsBuilder_ == null) {
          return options_.get(index);
        } else {
          return optionsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder setOptions(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
        if (optionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureOptionsIsMutable();
          options_.set(index, value);
          onChanged();
        } else {
          optionsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder setOptions(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
        if (optionsBuilder_ == null) {
          ensureOptionsIsMutable();
          options_.set(index, builderForValue.build());
          onChanged();
        } else {
          optionsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder addOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
        if (optionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureOptionsIsMutable();
          options_.add(value);
          onChanged();
        } else {
          optionsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder addOptions(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto value) {
        if (optionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureOptionsIsMutable();
          options_.add(index, value);
          onChanged();
        } else {
          optionsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder addOptions(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
        if (optionsBuilder_ == null) {
          ensureOptionsIsMutable();
          options_.add(builderForValue.build());
          onChanged();
        } else {
          optionsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder addOptions(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder builderForValue) {
        if (optionsBuilder_ == null) {
          ensureOptionsIsMutable();
          options_.add(index, builderForValue.build());
          onChanged();
        } else {
          optionsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder addAllOptions(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto> values) {
        if (optionsBuilder_ == null) {
          ensureOptionsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, options_);
          onChanged();
        } else {
          optionsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder clearOptions() {
        if (optionsBuilder_ == null) {
          options_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
        } else {
          optionsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public Builder removeOptions(int index) {
        if (optionsBuilder_ == null) {
          ensureOptionsIsMutable();
          options_.remove(index);
          onChanged();
        } else {
          optionsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder getOptionsBuilder(
          int index) {
        return getOptionsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder getOptionsOrBuilder(
          int index) {
        if (optionsBuilder_ == null) {
          return options_.get(index);  } else {
          return optionsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> 
           getOptionsOrBuilderList() {
        if (optionsBuilder_ != null) {
          return optionsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(options_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder() {
        return getOptionsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder addOptionsBuilder(
          int index) {
        return getOptionsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.ECSchemaOptionEntryProto options = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder> 
           getOptionsBuilderList() {
        return getOptionsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder> 
          getOptionsFieldBuilder() {
        if (optionsBuilder_ == null) {
          optionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaOptionEntryProtoOrBuilder>(
                  options_,
                  ((bitField0_ & 0x00000008) != 0),
                  getParentForChildren(),
                  isClean());
          options_ = null;
        }
        return optionsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECSchemaProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECSchemaProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ECSchemaProto>() {
      @java.lang.Override
      public ECSchemaProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ECSchemaProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ErasureCodingPolicyProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ErasureCodingPolicyProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional string name = 1;</code>
     * @return Whether the name field is set.
     */
    boolean hasName();
    /**
     * <code>optional string name = 1;</code>
     * @return The name.
     */
    java.lang.String getName();
    /**
     * <code>optional string name = 1;</code>
     * @return The bytes for name.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes();

    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     * @return Whether the schema field is set.
     */
    boolean hasSchema();
    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     * @return The schema.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema();
    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder();

    /**
     * <code>optional uint32 cellSize = 3;</code>
     * @return Whether the cellSize field is set.
     */
    boolean hasCellSize();
    /**
     * <code>optional uint32 cellSize = 3;</code>
     * @return The cellSize.
     */
    int getCellSize();

    /**
     * <pre>
     * Actually a byte - only 8 bits used
     * </pre>
     *
     * <code>required uint32 id = 4;</code>
     * @return Whether the id field is set.
     */
    boolean hasId();
    /**
     * <pre>
     * Actually a byte - only 8 bits used
     * </pre>
     *
     * <code>required uint32 id = 4;</code>
     * @return The id.
     */
    int getId();

    /**
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
     * @return Whether the state field is set.
     */
    boolean hasState();
    /**
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
     * @return The state.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto}
   */
  public static final class ErasureCodingPolicyProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ErasureCodingPolicyProto)
      ErasureCodingPolicyProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ErasureCodingPolicyProto.newBuilder() to construct.
    private ErasureCodingPolicyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ErasureCodingPolicyProto() {
      name_ = "";
      state_ = 2;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ErasureCodingPolicyProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class);
    }

    private int bitField0_;
    public static final int NAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object name_ = "";
    /**
     * <code>optional string name = 1;</code>
     * @return Whether the name field is set.
     */
    @java.lang.Override
    public boolean hasName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional string name = 1;</code>
     * @return The name.
     */
    @java.lang.Override
    public java.lang.String getName() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          name_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string name = 1;</code>
     * @return The bytes for name.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getNameBytes() {
      java.lang.Object ref = name_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        name_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SCHEMA_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_;
    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     * @return Whether the schema field is set.
     */
    @java.lang.Override
    public boolean hasSchema() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     * @return The schema.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() {
      return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_;
    }
    /**
     * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() {
      return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_;
    }

    public static final int CELLSIZE_FIELD_NUMBER = 3;
    private int cellSize_ = 0;
    /**
     * <code>optional uint32 cellSize = 3;</code>
     * @return Whether the cellSize field is set.
     */
    @java.lang.Override
    public boolean hasCellSize() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint32 cellSize = 3;</code>
     * @return The cellSize.
     */
    @java.lang.Override
    public int getCellSize() {
      return cellSize_;
    }

    public static final int ID_FIELD_NUMBER = 4;
    private int id_ = 0;
    /**
     * <pre>
     * Actually a byte - only 8 bits used
     * </pre>
     *
     * <code>required uint32 id = 4;</code>
     * @return Whether the id field is set.
     */
    @java.lang.Override
    public boolean hasId() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * Actually a byte - only 8 bits used
     * </pre>
     *
     * <code>required uint32 id = 4;</code>
     * @return The id.
     */
    @java.lang.Override
    public int getId() {
      return id_;
    }

    public static final int STATE_FIELD_NUMBER = 5;
    private int state_ = 2;
    /**
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
     * @return Whether the state field is set.
     */
    @java.lang.Override public boolean hasState() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
     * @return The state.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(state_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasSchema()) {
        if (!getSchema().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getSchema());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, cellSize_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(4, id_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeEnum(5, state_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getSchema());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, cellSize_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(4, id_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(5, state_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) obj;

      if (hasName() != other.hasName()) return false;
      if (hasName()) {
        if (!getName()
            .equals(other.getName())) return false;
      }
      if (hasSchema() != other.hasSchema()) return false;
      if (hasSchema()) {
        if (!getSchema()
            .equals(other.getSchema())) return false;
      }
      if (hasCellSize() != other.hasCellSize()) return false;
      if (hasCellSize()) {
        if (getCellSize()
            != other.getCellSize()) return false;
      }
      if (hasId() != other.hasId()) return false;
      if (hasId()) {
        if (getId()
            != other.getId()) return false;
      }
      if (hasState() != other.hasState()) return false;
      if (hasState()) {
        if (state_ != other.state_) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasName()) {
        hash = (37 * hash) + NAME_FIELD_NUMBER;
        hash = (53 * hash) + getName().hashCode();
      }
      if (hasSchema()) {
        hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
        hash = (53 * hash) + getSchema().hashCode();
      }
      if (hasCellSize()) {
        hash = (37 * hash) + CELLSIZE_FIELD_NUMBER;
        hash = (53 * hash) + getCellSize();
      }
      if (hasId()) {
        hash = (37 * hash) + ID_FIELD_NUMBER;
        hash = (53 * hash) + getId();
      }
      if (hasState()) {
        hash = (37 * hash) + STATE_FIELD_NUMBER;
        hash = (53 * hash) + state_;
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ErasureCodingPolicyProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ErasureCodingPolicyProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getSchemaFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        name_ = "";
        schema_ = null;
        if (schemaBuilder_ != null) {
          schemaBuilder_.dispose();
          schemaBuilder_ = null;
        }
        cellSize_ = 0;
        id_ = 0;
        state_ = 2;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.name_ = name_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.schema_ = schemaBuilder_ == null
              ? schema_
              : schemaBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.cellSize_ = cellSize_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.id_ = id_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.state_ = state_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) return this;
        if (other.hasName()) {
          name_ = other.name_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasSchema()) {
          mergeSchema(other.getSchema());
        }
        if (other.hasCellSize()) {
          setCellSize(other.getCellSize());
        }
        if (other.hasId()) {
          setId(other.getId());
        }
        if (other.hasState()) {
          setState(other.getState());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasId()) {
          return false;
        }
        if (hasSchema()) {
          if (!getSchema().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                name_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getSchemaFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                cellSize_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                id_ = input.readUInt32();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(5, tmpRaw);
                } else {
                  state_ = tmpRaw;
                  bitField0_ |= 0x00000010;
                }
                break;
              } // case 40
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object name_ = "";
      /**
       * <code>optional string name = 1;</code>
       * @return Whether the name field is set.
       */
      public boolean hasName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The name.
       */
      public java.lang.String getName() {
        java.lang.Object ref = name_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            name_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string name = 1;</code>
       * @return The bytes for name.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNameBytes() {
        java.lang.Object ref = name_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          name_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string name = 1;</code>
       * @param value The name to set.
       * @return This builder for chaining.
       */
      public Builder setName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional string name = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearName() {
        name_ = getDefaultInstance().getName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>optional string name = 1;</code>
       * @param value The bytes for name to set.
       * @return This builder for chaining.
       */
      public Builder setNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        name_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto schema_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> schemaBuilder_;
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       * @return Whether the schema field is set.
       */
      public boolean hasSchema() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       * @return The schema.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto getSchema() {
        if (schemaBuilder_ == null) {
          return schema_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_;
        } else {
          return schemaBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public Builder setSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) {
        if (schemaBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          schema_ = value;
        } else {
          schemaBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public Builder setSchema(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder builderForValue) {
        if (schemaBuilder_ == null) {
          schema_ = builderForValue.build();
        } else {
          schemaBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public Builder mergeSchema(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto value) {
        if (schemaBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            schema_ != null &&
            schema_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance()) {
            getSchemaBuilder().mergeFrom(value);
          } else {
            schema_ = value;
          }
        } else {
          schemaBuilder_.mergeFrom(value);
        }
        if (schema_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public Builder clearSchema() {
        bitField0_ = (bitField0_ & ~0x00000002);
        schema_ = null;
        if (schemaBuilder_ != null) {
          schemaBuilder_.dispose();
          schemaBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder getSchemaBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getSchemaFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder getSchemaOrBuilder() {
        if (schemaBuilder_ != null) {
          return schemaBuilder_.getMessageOrBuilder();
        } else {
          return schema_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.getDefaultInstance() : schema_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ECSchemaProto schema = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder> 
          getSchemaFieldBuilder() {
        if (schemaBuilder_ == null) {
          schemaBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECSchemaProtoOrBuilder>(
                  getSchema(),
                  getParentForChildren(),
                  isClean());
          schema_ = null;
        }
        return schemaBuilder_;
      }

      private int cellSize_ ;
      /**
       * <code>optional uint32 cellSize = 3;</code>
       * @return Whether the cellSize field is set.
       */
      @java.lang.Override
      public boolean hasCellSize() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint32 cellSize = 3;</code>
       * @return The cellSize.
       */
      @java.lang.Override
      public int getCellSize() {
        return cellSize_;
      }
      /**
       * <code>optional uint32 cellSize = 3;</code>
       * @param value The cellSize to set.
       * @return This builder for chaining.
       */
      public Builder setCellSize(int value) {

        cellSize_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 cellSize = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearCellSize() {
        bitField0_ = (bitField0_ & ~0x00000004);
        cellSize_ = 0;
        onChanged();
        return this;
      }

      private int id_ ;
      /**
       * <pre>
       * Actually a byte - only 8 bits used
       * </pre>
       *
       * <code>required uint32 id = 4;</code>
       * @return Whether the id field is set.
       */
      @java.lang.Override
      public boolean hasId() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * Actually a byte - only 8 bits used
       * </pre>
       *
       * <code>required uint32 id = 4;</code>
       * @return The id.
       */
      @java.lang.Override
      public int getId() {
        return id_;
      }
      /**
       * <pre>
       * Actually a byte - only 8 bits used
       * </pre>
       *
       * <code>required uint32 id = 4;</code>
       * @param value The id to set.
       * @return This builder for chaining.
       */
      public Builder setId(int value) {

        id_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Actually a byte - only 8 bits used
       * </pre>
       *
       * <code>required uint32 id = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearId() {
        bitField0_ = (bitField0_ & ~0x00000008);
        id_ = 0;
        onChanged();
        return this;
      }

      private int state_ = 2;
      /**
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
       * @return Whether the state field is set.
       */
      @java.lang.Override public boolean hasState() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
       * @return The state.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState getState() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.forNumber(state_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState.ENABLED : result;
      }
      /**
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
       * @param value The state to set.
       * @return This builder for chaining.
       */
      public Builder setState(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyState value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000010;
        state_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyState state = 5 [default = ENABLED];</code>
       * @return This builder for chaining.
       */
      public Builder clearState() {
        bitField0_ = (bitField0_ & ~0x00000010);
        state_ = 2;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ErasureCodingPolicyProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ErasureCodingPolicyProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingPolicyProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ErasureCodingPolicyProto>() {
      @java.lang.Override
      public ErasureCodingPolicyProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingPolicyProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ErasureCodingPolicyProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface AddErasureCodingPolicyResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     * @return Whether the policy field is set.
     */
    boolean hasPolicy();
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     * @return The policy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy();
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder();

    /**
     * <code>required bool succeed = 2;</code>
     * @return Whether the succeed field is set.
     */
    boolean hasSucceed();
    /**
     * <code>required bool succeed = 2;</code>
     * @return The succeed.
     */
    boolean getSucceed();

    /**
     * <code>optional string errorMsg = 3;</code>
     * @return Whether the errorMsg field is set.
     */
    boolean hasErrorMsg();
    /**
     * <code>optional string errorMsg = 3;</code>
     * @return The errorMsg.
     */
    java.lang.String getErrorMsg();
    /**
     * <code>optional string errorMsg = 3;</code>
     * @return The bytes for errorMsg.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorMsgBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto}
   */
  public static final class AddErasureCodingPolicyResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
      AddErasureCodingPolicyResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use AddErasureCodingPolicyResponseProto.newBuilder() to construct.
    private AddErasureCodingPolicyResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private AddErasureCodingPolicyResponseProto() {
      errorMsg_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new AddErasureCodingPolicyResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int POLICY_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_;
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     * @return Whether the policy field is set.
     */
    @java.lang.Override
    public boolean hasPolicy() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     * @return The policy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() {
      return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_;
    }
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() {
      return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_;
    }

    public static final int SUCCEED_FIELD_NUMBER = 2;
    private boolean succeed_ = false;
    /**
     * <code>required bool succeed = 2;</code>
     * @return Whether the succeed field is set.
     */
    @java.lang.Override
    public boolean hasSucceed() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required bool succeed = 2;</code>
     * @return The succeed.
     */
    @java.lang.Override
    public boolean getSucceed() {
      return succeed_;
    }

    public static final int ERRORMSG_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object errorMsg_ = "";
    /**
     * <code>optional string errorMsg = 3;</code>
     * @return Whether the errorMsg field is set.
     */
    @java.lang.Override
    public boolean hasErrorMsg() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string errorMsg = 3;</code>
     * @return The errorMsg.
     */
    @java.lang.Override
    public java.lang.String getErrorMsg() {
      java.lang.Object ref = errorMsg_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          errorMsg_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string errorMsg = 3;</code>
     * @return The bytes for errorMsg.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorMsgBytes() {
      java.lang.Object ref = errorMsg_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        errorMsg_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasPolicy()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSucceed()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getPolicy().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getPolicy());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, succeed_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, errorMsg_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getPolicy());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, succeed_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, errorMsg_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) obj;

      if (hasPolicy() != other.hasPolicy()) return false;
      if (hasPolicy()) {
        if (!getPolicy()
            .equals(other.getPolicy())) return false;
      }
      if (hasSucceed() != other.hasSucceed()) return false;
      if (hasSucceed()) {
        if (getSucceed()
            != other.getSucceed()) return false;
      }
      if (hasErrorMsg() != other.hasErrorMsg()) return false;
      if (hasErrorMsg()) {
        if (!getErrorMsg()
            .equals(other.getErrorMsg())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasPolicy()) {
        hash = (37 * hash) + POLICY_FIELD_NUMBER;
        hash = (53 * hash) + getPolicy().hashCode();
      }
      if (hasSucceed()) {
        hash = (37 * hash) + SUCCEED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getSucceed());
      }
      if (hasErrorMsg()) {
        hash = (37 * hash) + ERRORMSG_FIELD_NUMBER;
        hash = (53 * hash) + getErrorMsg().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.AddErasureCodingPolicyResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPolicyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        policy_ = null;
        if (policyBuilder_ != null) {
          policyBuilder_.dispose();
          policyBuilder_ = null;
        }
        succeed_ = false;
        errorMsg_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.policy_ = policyBuilder_ == null
              ? policy_
              : policyBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.succeed_ = succeed_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.errorMsg_ = errorMsg_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto.getDefaultInstance()) return this;
        if (other.hasPolicy()) {
          mergePolicy(other.getPolicy());
        }
        if (other.hasSucceed()) {
          setSucceed(other.getSucceed());
        }
        if (other.hasErrorMsg()) {
          errorMsg_ = other.errorMsg_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasPolicy()) {
          return false;
        }
        if (!hasSucceed()) {
          return false;
        }
        if (!getPolicy().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                succeed_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                errorMsg_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto policy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> policyBuilder_;
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       * @return Whether the policy field is set.
       */
      public boolean hasPolicy() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       * @return The policy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getPolicy() {
        if (policyBuilder_ == null) {
          return policy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_;
        } else {
          return policyBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public Builder setPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (policyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          policy_ = value;
        } else {
          policyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public Builder setPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (policyBuilder_ == null) {
          policy_ = builderForValue.build();
        } else {
          policyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public Builder mergePolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (policyBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            policy_ != null &&
            policy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
            getPolicyBuilder().mergeFrom(value);
          } else {
            policy_ = value;
          }
        } else {
          policyBuilder_.mergeFrom(value);
        }
        if (policy_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public Builder clearPolicy() {
        bitField0_ = (bitField0_ & ~0x00000001);
        policy_ = null;
        if (policyBuilder_ != null) {
          policyBuilder_.dispose();
          policyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getPolicyBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getPolicyFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getPolicyOrBuilder() {
        if (policyBuilder_ != null) {
          return policyBuilder_.getMessageOrBuilder();
        } else {
          return policy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : policy_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto policy = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
          getPolicyFieldBuilder() {
        if (policyBuilder_ == null) {
          policyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
                  getPolicy(),
                  getParentForChildren(),
                  isClean());
          policy_ = null;
        }
        return policyBuilder_;
      }

      private boolean succeed_ ;
      /**
       * <code>required bool succeed = 2;</code>
       * @return Whether the succeed field is set.
       */
      @java.lang.Override
      public boolean hasSucceed() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required bool succeed = 2;</code>
       * @return The succeed.
       */
      @java.lang.Override
      public boolean getSucceed() {
        return succeed_;
      }
      /**
       * <code>required bool succeed = 2;</code>
       * @param value The succeed to set.
       * @return This builder for chaining.
       */
      public Builder setSucceed(boolean value) {

        succeed_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required bool succeed = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSucceed() {
        bitField0_ = (bitField0_ & ~0x00000002);
        succeed_ = false;
        onChanged();
        return this;
      }

      private java.lang.Object errorMsg_ = "";
      /**
       * <code>optional string errorMsg = 3;</code>
       * @return Whether the errorMsg field is set.
       */
      public boolean hasErrorMsg() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string errorMsg = 3;</code>
       * @return The errorMsg.
       */
      public java.lang.String getErrorMsg() {
        java.lang.Object ref = errorMsg_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            errorMsg_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string errorMsg = 3;</code>
       * @return The bytes for errorMsg.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getErrorMsgBytes() {
        java.lang.Object ref = errorMsg_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          errorMsg_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string errorMsg = 3;</code>
       * @param value The errorMsg to set.
       * @return This builder for chaining.
       */
      public Builder setErrorMsg(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        errorMsg_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string errorMsg = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearErrorMsg() {
        errorMsg_ = getDefaultInstance().getErrorMsg();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string errorMsg = 3;</code>
       * @param value The bytes for errorMsg to set.
       * @return This builder for chaining.
       */
      public Builder setErrorMsgBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        errorMsg_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.AddErasureCodingPolicyResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<AddErasureCodingPolicyResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<AddErasureCodingPolicyResponseProto>() {
      @java.lang.Override
      public AddErasureCodingPolicyResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<AddErasureCodingPolicyResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<AddErasureCodingPolicyResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddErasureCodingPolicyResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ECTopologyVerifierResultProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ECTopologyVerifierResultProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string resultMessage = 1;</code>
     * @return Whether the resultMessage field is set.
     */
    boolean hasResultMessage();
    /**
     * <code>required string resultMessage = 1;</code>
     * @return The resultMessage.
     */
    java.lang.String getResultMessage();
    /**
     * <code>required string resultMessage = 1;</code>
     * @return The bytes for resultMessage.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getResultMessageBytes();

    /**
     * <code>required bool isSupported = 2;</code>
     * @return Whether the isSupported field is set.
     */
    boolean hasIsSupported();
    /**
     * <code>required bool isSupported = 2;</code>
     * @return The isSupported.
     */
    boolean getIsSupported();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto}
   */
  public static final class ECTopologyVerifierResultProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ECTopologyVerifierResultProto)
      ECTopologyVerifierResultProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ECTopologyVerifierResultProto.newBuilder() to construct.
    private ECTopologyVerifierResultProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ECTopologyVerifierResultProto() {
      resultMessage_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ECTopologyVerifierResultProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class);
    }

    private int bitField0_;
    public static final int RESULTMESSAGE_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object resultMessage_ = "";
    /**
     * <code>required string resultMessage = 1;</code>
     * @return Whether the resultMessage field is set.
     */
    @java.lang.Override
    public boolean hasResultMessage() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string resultMessage = 1;</code>
     * @return The resultMessage.
     */
    @java.lang.Override
    public java.lang.String getResultMessage() {
      java.lang.Object ref = resultMessage_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          resultMessage_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string resultMessage = 1;</code>
     * @return The bytes for resultMessage.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getResultMessageBytes() {
      java.lang.Object ref = resultMessage_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        resultMessage_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int ISSUPPORTED_FIELD_NUMBER = 2;
    private boolean isSupported_ = false;
    /**
     * <code>required bool isSupported = 2;</code>
     * @return Whether the isSupported field is set.
     */
    @java.lang.Override
    public boolean hasIsSupported() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required bool isSupported = 2;</code>
     * @return The isSupported.
     */
    @java.lang.Override
    public boolean getIsSupported() {
      return isSupported_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasResultMessage()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIsSupported()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, resultMessage_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, isSupported_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, resultMessage_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, isSupported_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) obj;

      if (hasResultMessage() != other.hasResultMessage()) return false;
      if (hasResultMessage()) {
        if (!getResultMessage()
            .equals(other.getResultMessage())) return false;
      }
      if (hasIsSupported() != other.hasIsSupported()) return false;
      if (hasIsSupported()) {
        if (getIsSupported()
            != other.getIsSupported()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasResultMessage()) {
        hash = (37 * hash) + RESULTMESSAGE_FIELD_NUMBER;
        hash = (53 * hash) + getResultMessage().hashCode();
      }
      if (hasIsSupported()) {
        hash = (37 * hash) + ISSUPPORTED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsSupported());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ECTopologyVerifierResultProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ECTopologyVerifierResultProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        resultMessage_ = "";
        isSupported_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.resultMessage_ = resultMessage_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.isSupported_ = isSupported_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto.getDefaultInstance()) return this;
        if (other.hasResultMessage()) {
          resultMessage_ = other.resultMessage_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasIsSupported()) {
          setIsSupported(other.getIsSupported());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasResultMessage()) {
          return false;
        }
        if (!hasIsSupported()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                resultMessage_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                isSupported_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object resultMessage_ = "";
      /**
       * <code>required string resultMessage = 1;</code>
       * @return Whether the resultMessage field is set.
       */
      public boolean hasResultMessage() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string resultMessage = 1;</code>
       * @return The resultMessage.
       */
      public java.lang.String getResultMessage() {
        java.lang.Object ref = resultMessage_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            resultMessage_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string resultMessage = 1;</code>
       * @return The bytes for resultMessage.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getResultMessageBytes() {
        java.lang.Object ref = resultMessage_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          resultMessage_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string resultMessage = 1;</code>
       * @param value The resultMessage to set.
       * @return This builder for chaining.
       */
      public Builder setResultMessage(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        resultMessage_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string resultMessage = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearResultMessage() {
        resultMessage_ = getDefaultInstance().getResultMessage();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string resultMessage = 1;</code>
       * @param value The bytes for resultMessage to set.
       * @return This builder for chaining.
       */
      public Builder setResultMessageBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        resultMessage_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private boolean isSupported_ ;
      /**
       * <code>required bool isSupported = 2;</code>
       * @return Whether the isSupported field is set.
       */
      @java.lang.Override
      public boolean hasIsSupported() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required bool isSupported = 2;</code>
       * @return The isSupported.
       */
      @java.lang.Override
      public boolean getIsSupported() {
        return isSupported_;
      }
      /**
       * <code>required bool isSupported = 2;</code>
       * @param value The isSupported to set.
       * @return This builder for chaining.
       */
      public Builder setIsSupported(boolean value) {

        isSupported_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required bool isSupported = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearIsSupported() {
        bitField0_ = (bitField0_ & ~0x00000002);
        isSupported_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ECTopologyVerifierResultProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ECTopologyVerifierResultProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ECTopologyVerifierResultProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ECTopologyVerifierResultProto>() {
      @java.lang.Override
      public ECTopologyVerifierResultProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ECTopologyVerifierResultProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ECTopologyVerifierResultProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ECTopologyVerifierResultProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface HdfsPathHandleProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsPathHandleProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint64 inodeId = 1;</code>
     * @return Whether the inodeId field is set.
     */
    boolean hasInodeId();
    /**
     * <code>optional uint64 inodeId = 1;</code>
     * @return The inodeId.
     */
    long getInodeId();

    /**
     * <code>optional uint64 mtime = 2;</code>
     * @return Whether the mtime field is set.
     */
    boolean hasMtime();
    /**
     * <code>optional uint64 mtime = 2;</code>
     * @return The mtime.
     */
    long getMtime();

    /**
     * <code>optional string path = 3;</code>
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * <code>optional string path = 3;</code>
     * @return The path.
     */
    java.lang.String getPath();
    /**
     * <code>optional string path = 3;</code>
     * @return The bytes for path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes();
  }
  /**
   * <pre>
   **
   * Placeholder type for consistent HDFS operations.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto}
   */
  public static final class HdfsPathHandleProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsPathHandleProto)
      HdfsPathHandleProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use HdfsPathHandleProto.newBuilder() to construct.
    private HdfsPathHandleProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private HdfsPathHandleProto() {
      path_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new HdfsPathHandleProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class);
    }

    private int bitField0_;
    public static final int INODEID_FIELD_NUMBER = 1;
    private long inodeId_ = 0L;
    /**
     * <code>optional uint64 inodeId = 1;</code>
     * @return Whether the inodeId field is set.
     */
    @java.lang.Override
    public boolean hasInodeId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint64 inodeId = 1;</code>
     * @return The inodeId.
     */
    @java.lang.Override
    public long getInodeId() {
      return inodeId_;
    }

    public static final int MTIME_FIELD_NUMBER = 2;
    private long mtime_ = 0L;
    /**
     * <code>optional uint64 mtime = 2;</code>
     * @return Whether the mtime field is set.
     */
    @java.lang.Override
    public boolean hasMtime() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint64 mtime = 2;</code>
     * @return The mtime.
     */
    @java.lang.Override
    public long getMtime() {
      return mtime_;
    }

    public static final int PATH_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object path_ = "";
    /**
     * <code>optional string path = 3;</code>
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string path = 3;</code>
     * @return The path.
     */
    @java.lang.Override
    public java.lang.String getPath() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          path_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string path = 3;</code>
     * @return The bytes for path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getPathBytes() {
      java.lang.Object ref = path_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        path_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, inodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, mtime_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, path_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, inodeId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, mtime_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, path_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) obj;

      if (hasInodeId() != other.hasInodeId()) return false;
      if (hasInodeId()) {
        if (getInodeId()
            != other.getInodeId()) return false;
      }
      if (hasMtime() != other.hasMtime()) return false;
      if (hasMtime()) {
        if (getMtime()
            != other.getMtime()) return false;
      }
      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasInodeId()) {
        hash = (37 * hash) + INODEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getInodeId());
      }
      if (hasMtime()) {
        hash = (37 * hash) + MTIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getMtime());
      }
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Placeholder type for consistent HDFS operations.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.HdfsPathHandleProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsPathHandleProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        inodeId_ = 0L;
        mtime_ = 0L;
        path_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.inodeId_ = inodeId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.mtime_ = mtime_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto.getDefaultInstance()) return this;
        if (other.hasInodeId()) {
          setInodeId(other.getInodeId());
        }
        if (other.hasMtime()) {
          setMtime(other.getMtime());
        }
        if (other.hasPath()) {
          path_ = other.path_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                inodeId_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                mtime_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long inodeId_ ;
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return Whether the inodeId field is set.
       */
      @java.lang.Override
      public boolean hasInodeId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return The inodeId.
       */
      @java.lang.Override
      public long getInodeId() {
        return inodeId_;
      }
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @param value The inodeId to set.
       * @return This builder for chaining.
       */
      public Builder setInodeId(long value) {

        inodeId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 inodeId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearInodeId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        inodeId_ = 0L;
        onChanged();
        return this;
      }

      private long mtime_ ;
      /**
       * <code>optional uint64 mtime = 2;</code>
       * @return Whether the mtime field is set.
       */
      @java.lang.Override
      public boolean hasMtime() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 mtime = 2;</code>
       * @return The mtime.
       */
      @java.lang.Override
      public long getMtime() {
        return mtime_;
      }
      /**
       * <code>optional uint64 mtime = 2;</code>
       * @param value The mtime to set.
       * @return This builder for chaining.
       */
      public Builder setMtime(long value) {

        mtime_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 mtime = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearMtime() {
        bitField0_ = (bitField0_ & ~0x00000002);
        mtime_ = 0L;
        onChanged();
        return this;
      }

      private java.lang.Object path_ = "";
      /**
       * <code>optional string path = 3;</code>
       * @return Whether the path field is set.
       */
      public boolean hasPath() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string path = 3;</code>
       * @return The path.
       */
      public java.lang.String getPath() {
        java.lang.Object ref = path_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            path_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string path = 3;</code>
       * @return The bytes for path.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getPathBytes() {
        java.lang.Object ref = path_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          path_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string path = 3;</code>
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string path = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        path_ = getDefaultInstance().getPath();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string path = 3;</code>
       * @param value The bytes for path to set.
       * @return This builder for chaining.
       */
      public Builder setPathBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsPathHandleProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsPathHandleProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<HdfsPathHandleProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<HdfsPathHandleProto>() {
      @java.lang.Override
      public HdfsPathHandleProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<HdfsPathHandleProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<HdfsPathHandleProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface HdfsFileStatusProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HdfsFileStatusProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
     * @return Whether the fileType field is set.
     */
    boolean hasFileType();
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
     * @return The fileType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType();

    /**
     * <pre>
     * local name of inode encoded java UTF8
     * </pre>
     *
     * <code>required bytes path = 2;</code>
     * @return Whether the path field is set.
     */
    boolean hasPath();
    /**
     * <pre>
     * local name of inode encoded java UTF8
     * </pre>
     *
     * <code>required bytes path = 2;</code>
     * @return The path.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getPath();

    /**
     * <code>required uint64 length = 3;</code>
     * @return Whether the length field is set.
     */
    boolean hasLength();
    /**
     * <code>required uint64 length = 3;</code>
     * @return The length.
     */
    long getLength();

    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     * @return Whether the permission field is set.
     */
    boolean hasPermission();
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     * @return The permission.
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission();
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();

    /**
     * <code>required string owner = 5;</code>
     * @return Whether the owner field is set.
     */
    boolean hasOwner();
    /**
     * <code>required string owner = 5;</code>
     * @return The owner.
     */
    java.lang.String getOwner();
    /**
     * <code>required string owner = 5;</code>
     * @return The bytes for owner.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerBytes();

    /**
     * <code>required string group = 6;</code>
     * @return Whether the group field is set.
     */
    boolean hasGroup();
    /**
     * <code>required string group = 6;</code>
     * @return The group.
     */
    java.lang.String getGroup();
    /**
     * <code>required string group = 6;</code>
     * @return The bytes for group.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupBytes();

    /**
     * <code>required uint64 modification_time = 7;</code>
     * @return Whether the modificationTime field is set.
     */
    boolean hasModificationTime();
    /**
     * <code>required uint64 modification_time = 7;</code>
     * @return The modificationTime.
     */
    long getModificationTime();

    /**
     * <code>required uint64 access_time = 8;</code>
     * @return Whether the accessTime field is set.
     */
    boolean hasAccessTime();
    /**
     * <code>required uint64 access_time = 8;</code>
     * @return The accessTime.
     */
    long getAccessTime();

    /**
     * <pre>
     * Optional fields for symlink
     * </pre>
     *
     * <code>optional bytes symlink = 9;</code>
     * @return Whether the symlink field is set.
     */
    boolean hasSymlink();
    /**
     * <pre>
     * Optional fields for symlink
     * </pre>
     *
     * <code>optional bytes symlink = 9;</code>
     * @return The symlink.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink();

    /**
     * <pre>
     * Optional fields for file
     * </pre>
     *
     * <code>optional uint32 block_replication = 10 [default = 0];</code>
     * @return Whether the blockReplication field is set.
     */
    boolean hasBlockReplication();
    /**
     * <pre>
     * Optional fields for file
     * </pre>
     *
     * <code>optional uint32 block_replication = 10 [default = 0];</code>
     * @return The blockReplication.
     */
    int getBlockReplication();

    /**
     * <code>optional uint64 blocksize = 11 [default = 0];</code>
     * @return Whether the blocksize field is set.
     */
    boolean hasBlocksize();
    /**
     * <code>optional uint64 blocksize = 11 [default = 0];</code>
     * @return The blocksize.
     */
    long getBlocksize();

    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     * @return Whether the locations field is set.
     */
    boolean hasLocations();
    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     * @return The locations.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations();
    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder();

    /**
     * <pre>
     * Optional field for fileId
     * </pre>
     *
     * <code>optional uint64 fileId = 13 [default = 0];</code>
     * @return Whether the fileId field is set.
     */
    boolean hasFileId();
    /**
     * <pre>
     * Optional field for fileId
     * </pre>
     *
     * <code>optional uint64 fileId = 13 [default = 0];</code>
     * @return The fileId.
     */
    long getFileId();

    /**
     * <code>optional int32 childrenNum = 14 [default = -1];</code>
     * @return Whether the childrenNum field is set.
     */
    boolean hasChildrenNum();
    /**
     * <code>optional int32 childrenNum = 14 [default = -1];</code>
     * @return The childrenNum.
     */
    int getChildrenNum();

    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     * @return Whether the fileEncryptionInfo field is set.
     */
    boolean hasFileEncryptionInfo();
    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     * @return The fileEncryptionInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo();
    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder();

    /**
     * <pre>
     * block storage policy id
     * </pre>
     *
     * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
     * @return Whether the storagePolicy field is set.
     */
    boolean hasStoragePolicy();
    /**
     * <pre>
     * block storage policy id
     * </pre>
     *
     * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
     * @return The storagePolicy.
     */
    int getStoragePolicy();

    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     * @return Whether the ecPolicy field is set.
     */
    boolean hasEcPolicy();
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     * @return The ecPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();

    /**
     * <pre>
     * Set of flags
     * </pre>
     *
     * <code>optional uint32 flags = 18 [default = 0];</code>
     * @return Whether the flags field is set.
     */
    boolean hasFlags();
    /**
     * <pre>
     * Set of flags
     * </pre>
     *
     * <code>optional uint32 flags = 18 [default = 0];</code>
     * @return The flags.
     */
    int getFlags();

    /**
     * <code>optional string namespace = 19;</code>
     * @return Whether the namespace field is set.
     */
    boolean hasNamespace();
    /**
     * <code>optional string namespace = 19;</code>
     * @return The namespace.
     */
    java.lang.String getNamespace();
    /**
     * <code>optional string namespace = 19;</code>
     * @return The bytes for namespace.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getNamespaceBytes();
  }
  /**
   * <pre>
   **
   * Status of a file, directory or symlink
   * Optionally includes a file's block locations if requested by client on the rpc call.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
   */
  public static final class HdfsFileStatusProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.HdfsFileStatusProto)
      HdfsFileStatusProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use HdfsFileStatusProto.newBuilder() to construct.
    private HdfsFileStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private HdfsFileStatusProto() {
      fileType_ = 1;
      path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      owner_ = "";
      group_ = "";
      symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      childrenNum_ = -1;
      namespace_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new HdfsFileStatusProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.FileType}
     */
    public enum FileType
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>IS_DIR = 1;</code>
       */
      IS_DIR(1),
      /**
       * <code>IS_FILE = 2;</code>
       */
      IS_FILE(2),
      /**
       * <code>IS_SYMLINK = 3;</code>
       */
      IS_SYMLINK(3),
      ;

      /**
       * <code>IS_DIR = 1;</code>
       */
      public static final int IS_DIR_VALUE = 1;
      /**
       * <code>IS_FILE = 2;</code>
       */
      public static final int IS_FILE_VALUE = 2;
      /**
       * <code>IS_SYMLINK = 3;</code>
       */
      public static final int IS_SYMLINK_VALUE = 3;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static FileType valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static FileType forNumber(int value) {
        switch (value) {
          case 1: return IS_DIR;
          case 2: return IS_FILE;
          case 3: return IS_SYMLINK;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<FileType>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          FileType> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<FileType>() {
              public FileType findValueByNumber(int number) {
                return FileType.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final FileType[] VALUES = values();

      public static FileType valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private FileType(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.FileType)
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.HdfsFileStatusProto.Flags}
     */
    public enum Flags
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <pre>
       * has ACLs
       * </pre>
       *
       * <code>HAS_ACL = 1;</code>
       */
      HAS_ACL(1),
      /**
       * <pre>
       * encrypted
       * </pre>
       *
       * <code>HAS_CRYPT = 2;</code>
       */
      HAS_CRYPT(2),
      /**
       * <pre>
       * erasure coded
       * </pre>
       *
       * <code>HAS_EC = 4;</code>
       */
      HAS_EC(4),
      /**
       * <pre>
       * SNAPSHOT ENABLED
       * </pre>
       *
       * <code>SNAPSHOT_ENABLED = 8;</code>
       */
      SNAPSHOT_ENABLED(8),
      ;

      /**
       * <pre>
       * has ACLs
       * </pre>
       *
       * <code>HAS_ACL = 1;</code>
       */
      public static final int HAS_ACL_VALUE = 1;
      /**
       * <pre>
       * encrypted
       * </pre>
       *
       * <code>HAS_CRYPT = 2;</code>
       */
      public static final int HAS_CRYPT_VALUE = 2;
      /**
       * <pre>
       * erasure coded
       * </pre>
       *
       * <code>HAS_EC = 4;</code>
       */
      public static final int HAS_EC_VALUE = 4;
      /**
       * <pre>
       * SNAPSHOT ENABLED
       * </pre>
       *
       * <code>SNAPSHOT_ENABLED = 8;</code>
       */
      public static final int SNAPSHOT_ENABLED_VALUE = 8;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static Flags valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static Flags forNumber(int value) {
        switch (value) {
          case 1: return HAS_ACL;
          case 2: return HAS_CRYPT;
          case 4: return HAS_EC;
          case 8: return SNAPSHOT_ENABLED;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Flags>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          Flags> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Flags>() {
              public Flags findValueByNumber(int number) {
                return Flags.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDescriptor().getEnumTypes().get(1);
      }

      private static final Flags[] VALUES = values();

      public static Flags valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private Flags(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.HdfsFileStatusProto.Flags)
    }

    private int bitField0_;
    public static final int FILETYPE_FIELD_NUMBER = 1;
    private int fileType_ = 1;
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
     * @return Whether the fileType field is set.
     */
    @java.lang.Override public boolean hasFileType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
     * @return The fileType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(fileType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result;
    }

    public static final int PATH_FIELD_NUMBER = 2;
    private org.apache.hadoop.thirdparty.protobuf.ByteString path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <pre>
     * local name of inode encoded java UTF8
     * </pre>
     *
     * <code>required bytes path = 2;</code>
     * @return Whether the path field is set.
     */
    @java.lang.Override
    public boolean hasPath() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * local name of inode encoded java UTF8
     * </pre>
     *
     * <code>required bytes path = 2;</code>
     * @return The path.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() {
      return path_;
    }

    public static final int LENGTH_FIELD_NUMBER = 3;
    private long length_ = 0L;
    /**
     * <code>required uint64 length = 3;</code>
     * @return Whether the length field is set.
     */
    @java.lang.Override
    public boolean hasLength() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 length = 3;</code>
     * @return The length.
     */
    @java.lang.Override
    public long getLength() {
      return length_;
    }

    public static final int PERMISSION_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_;
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     * @return Whether the permission field is set.
     */
    @java.lang.Override
    public boolean hasPermission() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     * @return The permission.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
      return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
    }
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
      return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
    }

    public static final int OWNER_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object owner_ = "";
    /**
     * <code>required string owner = 5;</code>
     * @return Whether the owner field is set.
     */
    @java.lang.Override
    public boolean hasOwner() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required string owner = 5;</code>
     * @return The owner.
     */
    @java.lang.Override
    public java.lang.String getOwner() {
      java.lang.Object ref = owner_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          owner_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string owner = 5;</code>
     * @return The bytes for owner.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerBytes() {
      java.lang.Object ref = owner_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        owner_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int GROUP_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private volatile java.lang.Object group_ = "";
    /**
     * <code>required string group = 6;</code>
     * @return Whether the group field is set.
     */
    @java.lang.Override
    public boolean hasGroup() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>required string group = 6;</code>
     * @return The group.
     */
    @java.lang.Override
    public java.lang.String getGroup() {
      java.lang.Object ref = group_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          group_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string group = 6;</code>
     * @return The bytes for group.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupBytes() {
      java.lang.Object ref = group_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        group_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int MODIFICATION_TIME_FIELD_NUMBER = 7;
    private long modificationTime_ = 0L;
    /**
     * <code>required uint64 modification_time = 7;</code>
     * @return Whether the modificationTime field is set.
     */
    @java.lang.Override
    public boolean hasModificationTime() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>required uint64 modification_time = 7;</code>
     * @return The modificationTime.
     */
    @java.lang.Override
    public long getModificationTime() {
      return modificationTime_;
    }

    public static final int ACCESS_TIME_FIELD_NUMBER = 8;
    private long accessTime_ = 0L;
    /**
     * <code>required uint64 access_time = 8;</code>
     * @return Whether the accessTime field is set.
     */
    @java.lang.Override
    public boolean hasAccessTime() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>required uint64 access_time = 8;</code>
     * @return The accessTime.
     */
    @java.lang.Override
    public long getAccessTime() {
      return accessTime_;
    }

    public static final int SYMLINK_FIELD_NUMBER = 9;
    private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <pre>
     * Optional fields for symlink
     * </pre>
     *
     * <code>optional bytes symlink = 9;</code>
     * @return Whether the symlink field is set.
     */
    @java.lang.Override
    public boolean hasSymlink() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <pre>
     * Optional fields for symlink
     * </pre>
     *
     * <code>optional bytes symlink = 9;</code>
     * @return The symlink.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() {
      return symlink_;
    }

    public static final int BLOCK_REPLICATION_FIELD_NUMBER = 10;
    private int blockReplication_ = 0;
    /**
     * <pre>
     * Optional fields for file
     * </pre>
     *
     * <code>optional uint32 block_replication = 10 [default = 0];</code>
     * @return Whether the blockReplication field is set.
     */
    @java.lang.Override
    public boolean hasBlockReplication() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * <pre>
     * Optional fields for file
     * </pre>
     *
     * <code>optional uint32 block_replication = 10 [default = 0];</code>
     * @return The blockReplication.
     */
    @java.lang.Override
    public int getBlockReplication() {
      return blockReplication_;
    }

    public static final int BLOCKSIZE_FIELD_NUMBER = 11;
    private long blocksize_ = 0L;
    /**
     * <code>optional uint64 blocksize = 11 [default = 0];</code>
     * @return Whether the blocksize field is set.
     */
    @java.lang.Override
    public boolean hasBlocksize() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * <code>optional uint64 blocksize = 11 [default = 0];</code>
     * @return The blocksize.
     */
    @java.lang.Override
    public long getBlocksize() {
      return blocksize_;
    }

    public static final int LOCATIONS_FIELD_NUMBER = 12;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_;
    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     * @return Whether the locations field is set.
     */
    @java.lang.Override
    public boolean hasLocations() {
      return ((bitField0_ & 0x00000800) != 0);
    }
    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     * @return The locations.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
      return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_;
    }
    /**
     * <pre>
     * suppled only if asked by client
     * </pre>
     *
     * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
      return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_;
    }

    public static final int FILEID_FIELD_NUMBER = 13;
    private long fileId_ = 0L;
    /**
     * <pre>
     * Optional field for fileId
     * </pre>
     *
     * <code>optional uint64 fileId = 13 [default = 0];</code>
     * @return Whether the fileId field is set.
     */
    @java.lang.Override
    public boolean hasFileId() {
      return ((bitField0_ & 0x00001000) != 0);
    }
    /**
     * <pre>
     * Optional field for fileId
     * </pre>
     *
     * <code>optional uint64 fileId = 13 [default = 0];</code>
     * @return The fileId.
     */
    @java.lang.Override
    public long getFileId() {
      return fileId_;
    }

    public static final int CHILDRENNUM_FIELD_NUMBER = 14;
    private int childrenNum_ = -1;
    /**
     * <code>optional int32 childrenNum = 14 [default = -1];</code>
     * @return Whether the childrenNum field is set.
     */
    @java.lang.Override
    public boolean hasChildrenNum() {
      return ((bitField0_ & 0x00002000) != 0);
    }
    /**
     * <code>optional int32 childrenNum = 14 [default = -1];</code>
     * @return The childrenNum.
     */
    @java.lang.Override
    public int getChildrenNum() {
      return childrenNum_;
    }

    public static final int FILEENCRYPTIONINFO_FIELD_NUMBER = 15;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     * @return Whether the fileEncryptionInfo field is set.
     */
    @java.lang.Override
    public boolean hasFileEncryptionInfo() {
      return ((bitField0_ & 0x00004000) != 0);
    }
    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     * @return The fileEncryptionInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
      return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
    }
    /**
     * <pre>
     * Optional field for file encryption
     * </pre>
     *
     * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
      return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
    }

    public static final int STORAGEPOLICY_FIELD_NUMBER = 16;
    private int storagePolicy_ = 0;
    /**
     * <pre>
     * block storage policy id
     * </pre>
     *
     * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
     * @return Whether the storagePolicy field is set.
     */
    @java.lang.Override
    public boolean hasStoragePolicy() {
      return ((bitField0_ & 0x00008000) != 0);
    }
    /**
     * <pre>
     * block storage policy id
     * </pre>
     *
     * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
     * @return The storagePolicy.
     */
    @java.lang.Override
    public int getStoragePolicy() {
      return storagePolicy_;
    }

    public static final int ECPOLICY_FIELD_NUMBER = 17;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     * @return Whether the ecPolicy field is set.
     */
    @java.lang.Override
    public boolean hasEcPolicy() {
      return ((bitField0_ & 0x00010000) != 0);
    }
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     * @return The ecPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }
    /**
     * <pre>
     * Optional field for erasure coding
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }

    public static final int FLAGS_FIELD_NUMBER = 18;
    private int flags_ = 0;
    /**
     * <pre>
     * Set of flags
     * </pre>
     *
     * <code>optional uint32 flags = 18 [default = 0];</code>
     * @return Whether the flags field is set.
     */
    @java.lang.Override
    public boolean hasFlags() {
      return ((bitField0_ & 0x00020000) != 0);
    }
    /**
     * <pre>
     * Set of flags
     * </pre>
     *
     * <code>optional uint32 flags = 18 [default = 0];</code>
     * @return The flags.
     */
    @java.lang.Override
    public int getFlags() {
      return flags_;
    }

    public static final int NAMESPACE_FIELD_NUMBER = 19;
    @SuppressWarnings("serial")
    private volatile java.lang.Object namespace_ = "";
    /**
     * <code>optional string namespace = 19;</code>
     * @return Whether the namespace field is set.
     */
    @java.lang.Override
    public boolean hasNamespace() {
      return ((bitField0_ & 0x00040000) != 0);
    }
    /**
     * <code>optional string namespace = 19;</code>
     * @return The namespace.
     */
    @java.lang.Override
    public java.lang.String getNamespace() {
      java.lang.Object ref = namespace_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          namespace_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string namespace = 19;</code>
     * @return The bytes for namespace.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getNamespaceBytes() {
      java.lang.Object ref = namespace_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        namespace_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasFileType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLength()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPermission()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOwner()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasGroup()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasModificationTime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasAccessTime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getPermission().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasLocations()) {
        if (!getLocations().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasFileEncryptionInfo()) {
        if (!getFileEncryptionInfo().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasEcPolicy()) {
        if (!getEcPolicy().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, fileType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBytes(2, path_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, length_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(4, getPermission());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, owner_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, group_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt64(7, modificationTime_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeUInt64(8, accessTime_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeBytes(9, symlink_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeUInt32(10, blockReplication_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeUInt64(11, blocksize_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        output.writeMessage(12, getLocations());
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        output.writeUInt64(13, fileId_);
      }
      if (((bitField0_ & 0x00002000) != 0)) {
        output.writeInt32(14, childrenNum_);
      }
      if (((bitField0_ & 0x00004000) != 0)) {
        output.writeMessage(15, getFileEncryptionInfo());
      }
      if (((bitField0_ & 0x00008000) != 0)) {
        output.writeUInt32(16, storagePolicy_);
      }
      if (((bitField0_ & 0x00010000) != 0)) {
        output.writeMessage(17, getEcPolicy());
      }
      if (((bitField0_ & 0x00020000) != 0)) {
        output.writeUInt32(18, flags_);
      }
      if (((bitField0_ & 0x00040000) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 19, namespace_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, fileType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(2, path_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, length_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getPermission());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, owner_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, group_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, modificationTime_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(8, accessTime_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(9, symlink_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(10, blockReplication_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(11, blocksize_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(12, getLocations());
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(13, fileId_);
      }
      if (((bitField0_ & 0x00002000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(14, childrenNum_);
      }
      if (((bitField0_ & 0x00004000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(15, getFileEncryptionInfo());
      }
      if (((bitField0_ & 0x00008000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(16, storagePolicy_);
      }
      if (((bitField0_ & 0x00010000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(17, getEcPolicy());
      }
      if (((bitField0_ & 0x00020000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(18, flags_);
      }
      if (((bitField0_ & 0x00040000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(19, namespace_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) obj;

      if (hasFileType() != other.hasFileType()) return false;
      if (hasFileType()) {
        if (fileType_ != other.fileType_) return false;
      }
      if (hasPath() != other.hasPath()) return false;
      if (hasPath()) {
        if (!getPath()
            .equals(other.getPath())) return false;
      }
      if (hasLength() != other.hasLength()) return false;
      if (hasLength()) {
        if (getLength()
            != other.getLength()) return false;
      }
      if (hasPermission() != other.hasPermission()) return false;
      if (hasPermission()) {
        if (!getPermission()
            .equals(other.getPermission())) return false;
      }
      if (hasOwner() != other.hasOwner()) return false;
      if (hasOwner()) {
        if (!getOwner()
            .equals(other.getOwner())) return false;
      }
      if (hasGroup() != other.hasGroup()) return false;
      if (hasGroup()) {
        if (!getGroup()
            .equals(other.getGroup())) return false;
      }
      if (hasModificationTime() != other.hasModificationTime()) return false;
      if (hasModificationTime()) {
        if (getModificationTime()
            != other.getModificationTime()) return false;
      }
      if (hasAccessTime() != other.hasAccessTime()) return false;
      if (hasAccessTime()) {
        if (getAccessTime()
            != other.getAccessTime()) return false;
      }
      if (hasSymlink() != other.hasSymlink()) return false;
      if (hasSymlink()) {
        if (!getSymlink()
            .equals(other.getSymlink())) return false;
      }
      if (hasBlockReplication() != other.hasBlockReplication()) return false;
      if (hasBlockReplication()) {
        if (getBlockReplication()
            != other.getBlockReplication()) return false;
      }
      if (hasBlocksize() != other.hasBlocksize()) return false;
      if (hasBlocksize()) {
        if (getBlocksize()
            != other.getBlocksize()) return false;
      }
      if (hasLocations() != other.hasLocations()) return false;
      if (hasLocations()) {
        if (!getLocations()
            .equals(other.getLocations())) return false;
      }
      if (hasFileId() != other.hasFileId()) return false;
      if (hasFileId()) {
        if (getFileId()
            != other.getFileId()) return false;
      }
      if (hasChildrenNum() != other.hasChildrenNum()) return false;
      if (hasChildrenNum()) {
        if (getChildrenNum()
            != other.getChildrenNum()) return false;
      }
      if (hasFileEncryptionInfo() != other.hasFileEncryptionInfo()) return false;
      if (hasFileEncryptionInfo()) {
        if (!getFileEncryptionInfo()
            .equals(other.getFileEncryptionInfo())) return false;
      }
      if (hasStoragePolicy() != other.hasStoragePolicy()) return false;
      if (hasStoragePolicy()) {
        if (getStoragePolicy()
            != other.getStoragePolicy()) return false;
      }
      if (hasEcPolicy() != other.hasEcPolicy()) return false;
      if (hasEcPolicy()) {
        if (!getEcPolicy()
            .equals(other.getEcPolicy())) return false;
      }
      if (hasFlags() != other.hasFlags()) return false;
      if (hasFlags()) {
        if (getFlags()
            != other.getFlags()) return false;
      }
      if (hasNamespace() != other.hasNamespace()) return false;
      if (hasNamespace()) {
        if (!getNamespace()
            .equals(other.getNamespace())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasFileType()) {
        hash = (37 * hash) + FILETYPE_FIELD_NUMBER;
        hash = (53 * hash) + fileType_;
      }
      if (hasPath()) {
        hash = (37 * hash) + PATH_FIELD_NUMBER;
        hash = (53 * hash) + getPath().hashCode();
      }
      if (hasLength()) {
        hash = (37 * hash) + LENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLength());
      }
      if (hasPermission()) {
        hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
        hash = (53 * hash) + getPermission().hashCode();
      }
      if (hasOwner()) {
        hash = (37 * hash) + OWNER_FIELD_NUMBER;
        hash = (53 * hash) + getOwner().hashCode();
      }
      if (hasGroup()) {
        hash = (37 * hash) + GROUP_FIELD_NUMBER;
        hash = (53 * hash) + getGroup().hashCode();
      }
      if (hasModificationTime()) {
        hash = (37 * hash) + MODIFICATION_TIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getModificationTime());
      }
      if (hasAccessTime()) {
        hash = (37 * hash) + ACCESS_TIME_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getAccessTime());
      }
      if (hasSymlink()) {
        hash = (37 * hash) + SYMLINK_FIELD_NUMBER;
        hash = (53 * hash) + getSymlink().hashCode();
      }
      if (hasBlockReplication()) {
        hash = (37 * hash) + BLOCK_REPLICATION_FIELD_NUMBER;
        hash = (53 * hash) + getBlockReplication();
      }
      if (hasBlocksize()) {
        hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlocksize());
      }
      if (hasLocations()) {
        hash = (37 * hash) + LOCATIONS_FIELD_NUMBER;
        hash = (53 * hash) + getLocations().hashCode();
      }
      if (hasFileId()) {
        hash = (37 * hash) + FILEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileId());
      }
      if (hasChildrenNum()) {
        hash = (37 * hash) + CHILDRENNUM_FIELD_NUMBER;
        hash = (53 * hash) + getChildrenNum();
      }
      if (hasFileEncryptionInfo()) {
        hash = (37 * hash) + FILEENCRYPTIONINFO_FIELD_NUMBER;
        hash = (53 * hash) + getFileEncryptionInfo().hashCode();
      }
      if (hasStoragePolicy()) {
        hash = (37 * hash) + STORAGEPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getStoragePolicy();
      }
      if (hasEcPolicy()) {
        hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getEcPolicy().hashCode();
      }
      if (hasFlags()) {
        hash = (37 * hash) + FLAGS_FIELD_NUMBER;
        hash = (53 * hash) + getFlags();
      }
      if (hasNamespace()) {
        hash = (37 * hash) + NAMESPACE_FIELD_NUMBER;
        hash = (53 * hash) + getNamespace().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Status of a file, directory or symlink
     * Optionally includes a file's block locations if requested by client on the rpc call.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.HdfsFileStatusProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HdfsFileStatusProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPermissionFieldBuilder();
          getLocationsFieldBuilder();
          getFileEncryptionInfoFieldBuilder();
          getEcPolicyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        fileType_ = 1;
        path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        length_ = 0L;
        permission_ = null;
        if (permissionBuilder_ != null) {
          permissionBuilder_.dispose();
          permissionBuilder_ = null;
        }
        owner_ = "";
        group_ = "";
        modificationTime_ = 0L;
        accessTime_ = 0L;
        symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        blockReplication_ = 0;
        blocksize_ = 0L;
        locations_ = null;
        if (locationsBuilder_ != null) {
          locationsBuilder_.dispose();
          locationsBuilder_ = null;
        }
        fileId_ = 0L;
        childrenNum_ = -1;
        fileEncryptionInfo_ = null;
        if (fileEncryptionInfoBuilder_ != null) {
          fileEncryptionInfoBuilder_.dispose();
          fileEncryptionInfoBuilder_ = null;
        }
        storagePolicy_ = 0;
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        flags_ = 0;
        namespace_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.fileType_ = fileType_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.path_ = path_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.length_ = length_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.permission_ = permissionBuilder_ == null
              ? permission_
              : permissionBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.owner_ = owner_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.group_ = group_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.modificationTime_ = modificationTime_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.accessTime_ = accessTime_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.symlink_ = symlink_;
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.blockReplication_ = blockReplication_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.blocksize_ = blocksize_;
          to_bitField0_ |= 0x00000400;
        }
        if (((from_bitField0_ & 0x00000800) != 0)) {
          result.locations_ = locationsBuilder_ == null
              ? locations_
              : locationsBuilder_.build();
          to_bitField0_ |= 0x00000800;
        }
        if (((from_bitField0_ & 0x00001000) != 0)) {
          result.fileId_ = fileId_;
          to_bitField0_ |= 0x00001000;
        }
        if (((from_bitField0_ & 0x00002000) != 0)) {
          result.childrenNum_ = childrenNum_;
          to_bitField0_ |= 0x00002000;
        }
        if (((from_bitField0_ & 0x00004000) != 0)) {
          result.fileEncryptionInfo_ = fileEncryptionInfoBuilder_ == null
              ? fileEncryptionInfo_
              : fileEncryptionInfoBuilder_.build();
          to_bitField0_ |= 0x00004000;
        }
        if (((from_bitField0_ & 0x00008000) != 0)) {
          result.storagePolicy_ = storagePolicy_;
          to_bitField0_ |= 0x00008000;
        }
        if (((from_bitField0_ & 0x00010000) != 0)) {
          result.ecPolicy_ = ecPolicyBuilder_ == null
              ? ecPolicy_
              : ecPolicyBuilder_.build();
          to_bitField0_ |= 0x00010000;
        }
        if (((from_bitField0_ & 0x00020000) != 0)) {
          result.flags_ = flags_;
          to_bitField0_ |= 0x00020000;
        }
        if (((from_bitField0_ & 0x00040000) != 0)) {
          result.namespace_ = namespace_;
          to_bitField0_ |= 0x00040000;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) return this;
        if (other.hasFileType()) {
          setFileType(other.getFileType());
        }
        if (other.hasPath()) {
          setPath(other.getPath());
        }
        if (other.hasLength()) {
          setLength(other.getLength());
        }
        if (other.hasPermission()) {
          mergePermission(other.getPermission());
        }
        if (other.hasOwner()) {
          owner_ = other.owner_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        if (other.hasGroup()) {
          group_ = other.group_;
          bitField0_ |= 0x00000020;
          onChanged();
        }
        if (other.hasModificationTime()) {
          setModificationTime(other.getModificationTime());
        }
        if (other.hasAccessTime()) {
          setAccessTime(other.getAccessTime());
        }
        if (other.hasSymlink()) {
          setSymlink(other.getSymlink());
        }
        if (other.hasBlockReplication()) {
          setBlockReplication(other.getBlockReplication());
        }
        if (other.hasBlocksize()) {
          setBlocksize(other.getBlocksize());
        }
        if (other.hasLocations()) {
          mergeLocations(other.getLocations());
        }
        if (other.hasFileId()) {
          setFileId(other.getFileId());
        }
        if (other.hasChildrenNum()) {
          setChildrenNum(other.getChildrenNum());
        }
        if (other.hasFileEncryptionInfo()) {
          mergeFileEncryptionInfo(other.getFileEncryptionInfo());
        }
        if (other.hasStoragePolicy()) {
          setStoragePolicy(other.getStoragePolicy());
        }
        if (other.hasEcPolicy()) {
          mergeEcPolicy(other.getEcPolicy());
        }
        if (other.hasFlags()) {
          setFlags(other.getFlags());
        }
        if (other.hasNamespace()) {
          namespace_ = other.namespace_;
          bitField0_ |= 0x00040000;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasFileType()) {
          return false;
        }
        if (!hasPath()) {
          return false;
        }
        if (!hasLength()) {
          return false;
        }
        if (!hasPermission()) {
          return false;
        }
        if (!hasOwner()) {
          return false;
        }
        if (!hasGroup()) {
          return false;
        }
        if (!hasModificationTime()) {
          return false;
        }
        if (!hasAccessTime()) {
          return false;
        }
        if (!getPermission().isInitialized()) {
          return false;
        }
        if (hasLocations()) {
          if (!getLocations().isInitialized()) {
            return false;
          }
        }
        if (hasFileEncryptionInfo()) {
          if (!getFileEncryptionInfo().isInitialized()) {
            return false;
          }
        }
        if (hasEcPolicy()) {
          if (!getEcPolicy().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  fileType_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                path_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 24: {
                length_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                input.readMessage(
                    getPermissionFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                owner_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 50: {
                group_ = input.readBytes();
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              case 56: {
                modificationTime_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 64: {
                accessTime_ = input.readUInt64();
                bitField0_ |= 0x00000080;
                break;
              } // case 64
              case 74: {
                symlink_ = input.readBytes();
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              case 80: {
                blockReplication_ = input.readUInt32();
                bitField0_ |= 0x00000200;
                break;
              } // case 80
              case 88: {
                blocksize_ = input.readUInt64();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              case 98: {
                input.readMessage(
                    getLocationsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000800;
                break;
              } // case 98
              case 104: {
                fileId_ = input.readUInt64();
                bitField0_ |= 0x00001000;
                break;
              } // case 104
              case 112: {
                childrenNum_ = input.readInt32();
                bitField0_ |= 0x00002000;
                break;
              } // case 112
              case 122: {
                input.readMessage(
                    getFileEncryptionInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00004000;
                break;
              } // case 122
              case 128: {
                storagePolicy_ = input.readUInt32();
                bitField0_ |= 0x00008000;
                break;
              } // case 128
              case 138: {
                input.readMessage(
                    getEcPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00010000;
                break;
              } // case 138
              case 144: {
                flags_ = input.readUInt32();
                bitField0_ |= 0x00020000;
                break;
              } // case 144
              case 154: {
                namespace_ = input.readBytes();
                bitField0_ |= 0x00040000;
                break;
              } // case 154
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int fileType_ = 1;
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
       * @return Whether the fileType field is set.
       */
      @java.lang.Override public boolean hasFileType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
       * @return The fileType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType getFileType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.forNumber(fileType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType.IS_DIR : result;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
       * @param value The fileType to set.
       * @return This builder for chaining.
       */
      public Builder setFileType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        fileType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto.FileType fileType = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        fileType_ = 1;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString path_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <pre>
       * local name of inode encoded java UTF8
       * </pre>
       *
       * <code>required bytes path = 2;</code>
       * @return Whether the path field is set.
       */
      @java.lang.Override
      public boolean hasPath() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * local name of inode encoded java UTF8
       * </pre>
       *
       * <code>required bytes path = 2;</code>
       * @return The path.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getPath() {
        return path_;
      }
      /**
       * <pre>
       * local name of inode encoded java UTF8
       * </pre>
       *
       * <code>required bytes path = 2;</code>
       * @param value The path to set.
       * @return This builder for chaining.
       */
      public Builder setPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        path_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * local name of inode encoded java UTF8
       * </pre>
       *
       * <code>required bytes path = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearPath() {
        bitField0_ = (bitField0_ & ~0x00000002);
        path_ = getDefaultInstance().getPath();
        onChanged();
        return this;
      }

      private long length_ ;
      /**
       * <code>required uint64 length = 3;</code>
       * @return Whether the length field is set.
       */
      @java.lang.Override
      public boolean hasLength() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 length = 3;</code>
       * @return The length.
       */
      @java.lang.Override
      public long getLength() {
        return length_;
      }
      /**
       * <code>required uint64 length = 3;</code>
       * @param value The length to set.
       * @return This builder for chaining.
       */
      public Builder setLength(long value) {

        length_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 length = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearLength() {
        bitField0_ = (bitField0_ & ~0x00000004);
        length_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       * @return Whether the permission field is set.
       */
      public boolean hasPermission() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       * @return The permission.
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
        if (permissionBuilder_ == null) {
          return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
        } else {
          return permissionBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permissionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          permission_ = value;
        } else {
          permissionBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public Builder setPermission(
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) {
        if (permissionBuilder_ == null) {
          permission_ = builderForValue.build();
        } else {
          permissionBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permissionBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            permission_ != null &&
            permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) {
            getPermissionBuilder().mergeFrom(value);
          } else {
            permission_ = value;
          }
        } else {
          permissionBuilder_.mergeFrom(value);
        }
        if (permission_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public Builder clearPermission() {
        bitField0_ = (bitField0_ & ~0x00000008);
        permission_ = null;
        if (permissionBuilder_ != null) {
          permissionBuilder_.dispose();
          permissionBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getPermissionFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
        if (permissionBuilder_ != null) {
          return permissionBuilder_.getMessageOrBuilder();
        } else {
          return permission_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> 
          getPermissionFieldBuilder() {
        if (permissionBuilder_ == null) {
          permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>(
                  getPermission(),
                  getParentForChildren(),
                  isClean());
          permission_ = null;
        }
        return permissionBuilder_;
      }

      private java.lang.Object owner_ = "";
      /**
       * <code>required string owner = 5;</code>
       * @return Whether the owner field is set.
       */
      public boolean hasOwner() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required string owner = 5;</code>
       * @return The owner.
       */
      public java.lang.String getOwner() {
        java.lang.Object ref = owner_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            owner_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string owner = 5;</code>
       * @return The bytes for owner.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerBytes() {
        java.lang.Object ref = owner_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          owner_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string owner = 5;</code>
       * @param value The owner to set.
       * @return This builder for chaining.
       */
      public Builder setOwner(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        owner_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required string owner = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearOwner() {
        owner_ = getDefaultInstance().getOwner();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * <code>required string owner = 5;</code>
       * @param value The bytes for owner to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        owner_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }

      private java.lang.Object group_ = "";
      /**
       * <code>required string group = 6;</code>
       * @return Whether the group field is set.
       */
      public boolean hasGroup() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required string group = 6;</code>
       * @return The group.
       */
      public java.lang.String getGroup() {
        java.lang.Object ref = group_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            group_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string group = 6;</code>
       * @return The bytes for group.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getGroupBytes() {
        java.lang.Object ref = group_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          group_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string group = 6;</code>
       * @param value The group to set.
       * @return This builder for chaining.
       */
      public Builder setGroup(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        group_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required string group = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearGroup() {
        group_ = getDefaultInstance().getGroup();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }
      /**
       * <code>required string group = 6;</code>
       * @param value The bytes for group to set.
       * @return This builder for chaining.
       */
      public Builder setGroupBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        group_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }

      private long modificationTime_ ;
      /**
       * <code>required uint64 modification_time = 7;</code>
       * @return Whether the modificationTime field is set.
       */
      @java.lang.Override
      public boolean hasModificationTime() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>required uint64 modification_time = 7;</code>
       * @return The modificationTime.
       */
      @java.lang.Override
      public long getModificationTime() {
        return modificationTime_;
      }
      /**
       * <code>required uint64 modification_time = 7;</code>
       * @param value The modificationTime to set.
       * @return This builder for chaining.
       */
      public Builder setModificationTime(long value) {

        modificationTime_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 modification_time = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearModificationTime() {
        bitField0_ = (bitField0_ & ~0x00000040);
        modificationTime_ = 0L;
        onChanged();
        return this;
      }

      private long accessTime_ ;
      /**
       * <code>required uint64 access_time = 8;</code>
       * @return Whether the accessTime field is set.
       */
      @java.lang.Override
      public boolean hasAccessTime() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>required uint64 access_time = 8;</code>
       * @return The accessTime.
       */
      @java.lang.Override
      public long getAccessTime() {
        return accessTime_;
      }
      /**
       * <code>required uint64 access_time = 8;</code>
       * @param value The accessTime to set.
       * @return This builder for chaining.
       */
      public Builder setAccessTime(long value) {

        accessTime_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 access_time = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearAccessTime() {
        bitField0_ = (bitField0_ & ~0x00000080);
        accessTime_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString symlink_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <pre>
       * Optional fields for symlink
       * </pre>
       *
       * <code>optional bytes symlink = 9;</code>
       * @return Whether the symlink field is set.
       */
      @java.lang.Override
      public boolean hasSymlink() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <pre>
       * Optional fields for symlink
       * </pre>
       *
       * <code>optional bytes symlink = 9;</code>
       * @return The symlink.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getSymlink() {
        return symlink_;
      }
      /**
       * <pre>
       * Optional fields for symlink
       * </pre>
       *
       * <code>optional bytes symlink = 9;</code>
       * @param value The symlink to set.
       * @return This builder for chaining.
       */
      public Builder setSymlink(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        symlink_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional fields for symlink
       * </pre>
       *
       * <code>optional bytes symlink = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearSymlink() {
        bitField0_ = (bitField0_ & ~0x00000100);
        symlink_ = getDefaultInstance().getSymlink();
        onChanged();
        return this;
      }

      private int blockReplication_ ;
      /**
       * <pre>
       * Optional fields for file
       * </pre>
       *
       * <code>optional uint32 block_replication = 10 [default = 0];</code>
       * @return Whether the blockReplication field is set.
       */
      @java.lang.Override
      public boolean hasBlockReplication() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <pre>
       * Optional fields for file
       * </pre>
       *
       * <code>optional uint32 block_replication = 10 [default = 0];</code>
       * @return The blockReplication.
       */
      @java.lang.Override
      public int getBlockReplication() {
        return blockReplication_;
      }
      /**
       * <pre>
       * Optional fields for file
       * </pre>
       *
       * <code>optional uint32 block_replication = 10 [default = 0];</code>
       * @param value The blockReplication to set.
       * @return This builder for chaining.
       */
      public Builder setBlockReplication(int value) {

        blockReplication_ = value;
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional fields for file
       * </pre>
       *
       * <code>optional uint32 block_replication = 10 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockReplication() {
        bitField0_ = (bitField0_ & ~0x00000200);
        blockReplication_ = 0;
        onChanged();
        return this;
      }

      private long blocksize_ ;
      /**
       * <code>optional uint64 blocksize = 11 [default = 0];</code>
       * @return Whether the blocksize field is set.
       */
      @java.lang.Override
      public boolean hasBlocksize() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional uint64 blocksize = 11 [default = 0];</code>
       * @return The blocksize.
       */
      @java.lang.Override
      public long getBlocksize() {
        return blocksize_;
      }
      /**
       * <code>optional uint64 blocksize = 11 [default = 0];</code>
       * @param value The blocksize to set.
       * @return This builder for chaining.
       */
      public Builder setBlocksize(long value) {

        blocksize_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 blocksize = 11 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlocksize() {
        bitField0_ = (bitField0_ & ~0x00000400);
        blocksize_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto locations_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> locationsBuilder_;
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       * @return Whether the locations field is set.
       */
      public boolean hasLocations() {
        return ((bitField0_ & 0x00000800) != 0);
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       * @return The locations.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto getLocations() {
        if (locationsBuilder_ == null) {
          return locations_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_;
        } else {
          return locationsBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public Builder setLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
        if (locationsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          locations_ = value;
        } else {
          locationsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000800;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public Builder setLocations(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder builderForValue) {
        if (locationsBuilder_ == null) {
          locations_ = builderForValue.build();
        } else {
          locationsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000800;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public Builder mergeLocations(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto value) {
        if (locationsBuilder_ == null) {
          if (((bitField0_ & 0x00000800) != 0) &&
            locations_ != null &&
            locations_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance()) {
            getLocationsBuilder().mergeFrom(value);
          } else {
            locations_ = value;
          }
        } else {
          locationsBuilder_.mergeFrom(value);
        }
        if (locations_ != null) {
          bitField0_ |= 0x00000800;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public Builder clearLocations() {
        bitField0_ = (bitField0_ & ~0x00000800);
        locations_ = null;
        if (locationsBuilder_ != null) {
          locationsBuilder_.dispose();
          locationsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder getLocationsBuilder() {
        bitField0_ |= 0x00000800;
        onChanged();
        return getLocationsFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder getLocationsOrBuilder() {
        if (locationsBuilder_ != null) {
          return locationsBuilder_.getMessageOrBuilder();
        } else {
          return locations_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.getDefaultInstance() : locations_;
        }
      }
      /**
       * <pre>
       * suppled only if asked by client
       * </pre>
       *
       * <code>optional .hadoop.hdfs.LocatedBlocksProto locations = 12;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder> 
          getLocationsFieldBuilder() {
        if (locationsBuilder_ == null) {
          locationsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProtoOrBuilder>(
                  getLocations(),
                  getParentForChildren(),
                  isClean());
          locations_ = null;
        }
        return locationsBuilder_;
      }

      private long fileId_ ;
      /**
       * <pre>
       * Optional field for fileId
       * </pre>
       *
       * <code>optional uint64 fileId = 13 [default = 0];</code>
       * @return Whether the fileId field is set.
       */
      @java.lang.Override
      public boolean hasFileId() {
        return ((bitField0_ & 0x00001000) != 0);
      }
      /**
       * <pre>
       * Optional field for fileId
       * </pre>
       *
       * <code>optional uint64 fileId = 13 [default = 0];</code>
       * @return The fileId.
       */
      @java.lang.Override
      public long getFileId() {
        return fileId_;
      }
      /**
       * <pre>
       * Optional field for fileId
       * </pre>
       *
       * <code>optional uint64 fileId = 13 [default = 0];</code>
       * @param value The fileId to set.
       * @return This builder for chaining.
       */
      public Builder setFileId(long value) {

        fileId_ = value;
        bitField0_ |= 0x00001000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for fileId
       * </pre>
       *
       * <code>optional uint64 fileId = 13 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearFileId() {
        bitField0_ = (bitField0_ & ~0x00001000);
        fileId_ = 0L;
        onChanged();
        return this;
      }

      private int childrenNum_ = -1;
      /**
       * <code>optional int32 childrenNum = 14 [default = -1];</code>
       * @return Whether the childrenNum field is set.
       */
      @java.lang.Override
      public boolean hasChildrenNum() {
        return ((bitField0_ & 0x00002000) != 0);
      }
      /**
       * <code>optional int32 childrenNum = 14 [default = -1];</code>
       * @return The childrenNum.
       */
      @java.lang.Override
      public int getChildrenNum() {
        return childrenNum_;
      }
      /**
       * <code>optional int32 childrenNum = 14 [default = -1];</code>
       * @param value The childrenNum to set.
       * @return This builder for chaining.
       */
      public Builder setChildrenNum(int value) {

        childrenNum_ = value;
        bitField0_ |= 0x00002000;
        onChanged();
        return this;
      }
      /**
       * <code>optional int32 childrenNum = 14 [default = -1];</code>
       * @return This builder for chaining.
       */
      public Builder clearChildrenNum() {
        bitField0_ = (bitField0_ & ~0x00002000);
        childrenNum_ = -1;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto fileEncryptionInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> fileEncryptionInfoBuilder_;
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       * @return Whether the fileEncryptionInfo field is set.
       */
      public boolean hasFileEncryptionInfo() {
        return ((bitField0_ & 0x00004000) != 0);
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       * @return The fileEncryptionInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto getFileEncryptionInfo() {
        if (fileEncryptionInfoBuilder_ == null) {
          return fileEncryptionInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
        } else {
          return fileEncryptionInfoBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public Builder setFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
        if (fileEncryptionInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          fileEncryptionInfo_ = value;
        } else {
          fileEncryptionInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public Builder setFileEncryptionInfo(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder builderForValue) {
        if (fileEncryptionInfoBuilder_ == null) {
          fileEncryptionInfo_ = builderForValue.build();
        } else {
          fileEncryptionInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public Builder mergeFileEncryptionInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto value) {
        if (fileEncryptionInfoBuilder_ == null) {
          if (((bitField0_ & 0x00004000) != 0) &&
            fileEncryptionInfo_ != null &&
            fileEncryptionInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance()) {
            getFileEncryptionInfoBuilder().mergeFrom(value);
          } else {
            fileEncryptionInfo_ = value;
          }
        } else {
          fileEncryptionInfoBuilder_.mergeFrom(value);
        }
        if (fileEncryptionInfo_ != null) {
          bitField0_ |= 0x00004000;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public Builder clearFileEncryptionInfo() {
        bitField0_ = (bitField0_ & ~0x00004000);
        fileEncryptionInfo_ = null;
        if (fileEncryptionInfoBuilder_ != null) {
          fileEncryptionInfoBuilder_.dispose();
          fileEncryptionInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder getFileEncryptionInfoBuilder() {
        bitField0_ |= 0x00004000;
        onChanged();
        return getFileEncryptionInfoFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder getFileEncryptionInfoOrBuilder() {
        if (fileEncryptionInfoBuilder_ != null) {
          return fileEncryptionInfoBuilder_.getMessageOrBuilder();
        } else {
          return fileEncryptionInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.getDefaultInstance() : fileEncryptionInfo_;
        }
      }
      /**
       * <pre>
       * Optional field for file encryption
       * </pre>
       *
       * <code>optional .hadoop.hdfs.FileEncryptionInfoProto fileEncryptionInfo = 15;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder> 
          getFileEncryptionInfoFieldBuilder() {
        if (fileEncryptionInfoBuilder_ == null) {
          fileEncryptionInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProtoOrBuilder>(
                  getFileEncryptionInfo(),
                  getParentForChildren(),
                  isClean());
          fileEncryptionInfo_ = null;
        }
        return fileEncryptionInfoBuilder_;
      }

      private int storagePolicy_ ;
      /**
       * <pre>
       * block storage policy id
       * </pre>
       *
       * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
       * @return Whether the storagePolicy field is set.
       */
      @java.lang.Override
      public boolean hasStoragePolicy() {
        return ((bitField0_ & 0x00008000) != 0);
      }
      /**
       * <pre>
       * block storage policy id
       * </pre>
       *
       * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
       * @return The storagePolicy.
       */
      @java.lang.Override
      public int getStoragePolicy() {
        return storagePolicy_;
      }
      /**
       * <pre>
       * block storage policy id
       * </pre>
       *
       * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
       * @param value The storagePolicy to set.
       * @return This builder for chaining.
       */
      public Builder setStoragePolicy(int value) {

        storagePolicy_ = value;
        bitField0_ |= 0x00008000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * block storage policy id
       * </pre>
       *
       * <code>optional uint32 storagePolicy = 16 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearStoragePolicy() {
        bitField0_ = (bitField0_ & ~0x00008000);
        storagePolicy_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       * @return Whether the ecPolicy field is set.
       */
      public boolean hasEcPolicy() {
        return ((bitField0_ & 0x00010000) != 0);
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       * @return The ecPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
        if (ecPolicyBuilder_ == null) {
          return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        } else {
          return ecPolicyBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ecPolicy_ = value;
        } else {
          ecPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public Builder setEcPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (ecPolicyBuilder_ == null) {
          ecPolicy_ = builderForValue.build();
        } else {
          ecPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00010000) != 0) &&
            ecPolicy_ != null &&
            ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
            getEcPolicyBuilder().mergeFrom(value);
          } else {
            ecPolicy_ = value;
          }
        } else {
          ecPolicyBuilder_.mergeFrom(value);
        }
        if (ecPolicy_ != null) {
          bitField0_ |= 0x00010000;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public Builder clearEcPolicy() {
        bitField0_ = (bitField0_ & ~0x00010000);
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
        bitField0_ |= 0x00010000;
        onChanged();
        return getEcPolicyFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
        if (ecPolicyBuilder_ != null) {
          return ecPolicyBuilder_.getMessageOrBuilder();
        } else {
          return ecPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        }
      }
      /**
       * <pre>
       * Optional field for erasure coding
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 17;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
          getEcPolicyFieldBuilder() {
        if (ecPolicyBuilder_ == null) {
          ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
                  getEcPolicy(),
                  getParentForChildren(),
                  isClean());
          ecPolicy_ = null;
        }
        return ecPolicyBuilder_;
      }

      private int flags_ ;
      /**
       * <pre>
       * Set of flags
       * </pre>
       *
       * <code>optional uint32 flags = 18 [default = 0];</code>
       * @return Whether the flags field is set.
       */
      @java.lang.Override
      public boolean hasFlags() {
        return ((bitField0_ & 0x00020000) != 0);
      }
      /**
       * <pre>
       * Set of flags
       * </pre>
       *
       * <code>optional uint32 flags = 18 [default = 0];</code>
       * @return The flags.
       */
      @java.lang.Override
      public int getFlags() {
        return flags_;
      }
      /**
       * <pre>
       * Set of flags
       * </pre>
       *
       * <code>optional uint32 flags = 18 [default = 0];</code>
       * @param value The flags to set.
       * @return This builder for chaining.
       */
      public Builder setFlags(int value) {

        flags_ = value;
        bitField0_ |= 0x00020000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Set of flags
       * </pre>
       *
       * <code>optional uint32 flags = 18 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearFlags() {
        bitField0_ = (bitField0_ & ~0x00020000);
        flags_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object namespace_ = "";
      /**
       * <code>optional string namespace = 19;</code>
       * @return Whether the namespace field is set.
       */
      public boolean hasNamespace() {
        return ((bitField0_ & 0x00040000) != 0);
      }
      /**
       * <code>optional string namespace = 19;</code>
       * @return The namespace.
       */
      public java.lang.String getNamespace() {
        java.lang.Object ref = namespace_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            namespace_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string namespace = 19;</code>
       * @return The bytes for namespace.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getNamespaceBytes() {
        java.lang.Object ref = namespace_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          namespace_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string namespace = 19;</code>
       * @param value The namespace to set.
       * @return This builder for chaining.
       */
      public Builder setNamespace(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        namespace_ = value;
        bitField0_ |= 0x00040000;
        onChanged();
        return this;
      }
      /**
       * <code>optional string namespace = 19;</code>
       * @return This builder for chaining.
       */
      public Builder clearNamespace() {
        namespace_ = getDefaultInstance().getNamespace();
        bitField0_ = (bitField0_ & ~0x00040000);
        onChanged();
        return this;
      }
      /**
       * <code>optional string namespace = 19;</code>
       * @param value The bytes for namespace to set.
       * @return This builder for chaining.
       */
      public Builder setNamespaceBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        namespace_ = value;
        bitField0_ |= 0x00040000;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HdfsFileStatusProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.HdfsFileStatusProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<HdfsFileStatusProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<HdfsFileStatusProto>() {
      @java.lang.Override
      public HdfsFileStatusProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<HdfsFileStatusProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<HdfsFileStatusProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockChecksumOptionsProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockChecksumOptionsProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
     * @return Whether the blockChecksumType field is set.
     */
    boolean hasBlockChecksumType();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
     * @return The blockChecksumType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType();

    /**
     * <pre>
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * </pre>
     *
     * <code>optional uint64 stripeLength = 2;</code>
     * @return Whether the stripeLength field is set.
     */
    boolean hasStripeLength();
    /**
     * <pre>
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * </pre>
     *
     * <code>optional uint64 stripeLength = 2;</code>
     * @return The stripeLength.
     */
    long getStripeLength();
  }
  /**
   * <pre>
   **
   * Algorithms/types denoting how block-level checksums are computed using
   * lower-level chunk checksums/CRCs.
   * These options should be kept in sync with
   * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto}
   */
  public static final class BlockChecksumOptionsProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockChecksumOptionsProto)
      BlockChecksumOptionsProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockChecksumOptionsProto.newBuilder() to construct.
    private BlockChecksumOptionsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockChecksumOptionsProto() {
      blockChecksumType_ = 1;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockChecksumOptionsProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCKCHECKSUMTYPE_FIELD_NUMBER = 1;
    private int blockChecksumType_ = 1;
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
     * @return Whether the blockChecksumType field is set.
     */
    @java.lang.Override public boolean hasBlockChecksumType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
     * @return The blockChecksumType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(blockChecksumType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result;
    }

    public static final int STRIPELENGTH_FIELD_NUMBER = 2;
    private long stripeLength_ = 0L;
    /**
     * <pre>
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * </pre>
     *
     * <code>optional uint64 stripeLength = 2;</code>
     * @return Whether the stripeLength field is set.
     */
    @java.lang.Override
    public boolean hasStripeLength() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * Only used if blockChecksumType specifies a striped format, such as
     * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
     * to be the concatenation of N crcs, where
     * N == ((requestedLength - 1) / stripedLength) + 1
     * </pre>
     *
     * <code>optional uint64 stripeLength = 2;</code>
     * @return The stripeLength.
     */
    @java.lang.Override
    public long getStripeLength() {
      return stripeLength_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, blockChecksumType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, stripeLength_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, blockChecksumType_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, stripeLength_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) obj;

      if (hasBlockChecksumType() != other.hasBlockChecksumType()) return false;
      if (hasBlockChecksumType()) {
        if (blockChecksumType_ != other.blockChecksumType_) return false;
      }
      if (hasStripeLength() != other.hasStripeLength()) return false;
      if (hasStripeLength()) {
        if (getStripeLength()
            != other.getStripeLength()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlockChecksumType()) {
        hash = (37 * hash) + BLOCKCHECKSUMTYPE_FIELD_NUMBER;
        hash = (53 * hash) + blockChecksumType_;
      }
      if (hasStripeLength()) {
        hash = (37 * hash) + STRIPELENGTH_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getStripeLength());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Algorithms/types denoting how block-level checksums are computed using
     * lower-level chunk checksums/CRCs.
     * These options should be kept in sync with
     * org.apache.hadoop.hdfs.protocol.BlockChecksumOptions.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.BlockChecksumOptionsProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockChecksumOptionsProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        blockChecksumType_ = 1;
        stripeLength_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.blockChecksumType_ = blockChecksumType_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.stripeLength_ = stripeLength_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) return this;
        if (other.hasBlockChecksumType()) {
          setBlockChecksumType(other.getBlockChecksumType());
        }
        if (other.hasStripeLength()) {
          setStripeLength(other.getStripeLength());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  blockChecksumType_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 16: {
                stripeLength_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int blockChecksumType_ = 1;
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
       * @return Whether the blockChecksumType field is set.
       */
      @java.lang.Override public boolean hasBlockChecksumType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
       * @return The blockChecksumType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto getBlockChecksumType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.forNumber(blockChecksumType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto.MD5CRC : result;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
       * @param value The blockChecksumType to set.
       * @return This builder for chaining.
       */
      public Builder setBlockChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        blockChecksumType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumTypeProto blockChecksumType = 1 [default = MD5CRC];</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockChecksumType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        blockChecksumType_ = 1;
        onChanged();
        return this;
      }

      private long stripeLength_ ;
      /**
       * <pre>
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * </pre>
       *
       * <code>optional uint64 stripeLength = 2;</code>
       * @return Whether the stripeLength field is set.
       */
      @java.lang.Override
      public boolean hasStripeLength() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * </pre>
       *
       * <code>optional uint64 stripeLength = 2;</code>
       * @return The stripeLength.
       */
      @java.lang.Override
      public long getStripeLength() {
        return stripeLength_;
      }
      /**
       * <pre>
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * </pre>
       *
       * <code>optional uint64 stripeLength = 2;</code>
       * @param value The stripeLength to set.
       * @return This builder for chaining.
       */
      public Builder setStripeLength(long value) {

        stripeLength_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Only used if blockChecksumType specifies a striped format, such as
       * COMPOSITE_CRC. If so, then the blockChecksum in the response is expected
       * to be the concatenation of N crcs, where
       * N == ((requestedLength - 1) / stripedLength) + 1
       * </pre>
       *
       * <code>optional uint64 stripeLength = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearStripeLength() {
        bitField0_ = (bitField0_ & ~0x00000002);
        stripeLength_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockChecksumOptionsProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockChecksumOptionsProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockChecksumOptionsProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockChecksumOptionsProto>() {
      @java.lang.Override
      public BlockChecksumOptionsProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockChecksumOptionsProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockChecksumOptionsProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface FsServerDefaultsProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.FsServerDefaultsProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 blockSize = 1;</code>
     * @return Whether the blockSize field is set.
     */
    boolean hasBlockSize();
    /**
     * <code>required uint64 blockSize = 1;</code>
     * @return The blockSize.
     */
    long getBlockSize();

    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return Whether the bytesPerChecksum field is set.
     */
    boolean hasBytesPerChecksum();
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return The bytesPerChecksum.
     */
    int getBytesPerChecksum();

    /**
     * <code>required uint32 writePacketSize = 3;</code>
     * @return Whether the writePacketSize field is set.
     */
    boolean hasWritePacketSize();
    /**
     * <code>required uint32 writePacketSize = 3;</code>
     * @return The writePacketSize.
     */
    int getWritePacketSize();

    /**
     * <pre>
     * Actually a short - only 16 bits used
     * </pre>
     *
     * <code>required uint32 replication = 4;</code>
     * @return Whether the replication field is set.
     */
    boolean hasReplication();
    /**
     * <pre>
     * Actually a short - only 16 bits used
     * </pre>
     *
     * <code>required uint32 replication = 4;</code>
     * @return The replication.
     */
    int getReplication();

    /**
     * <code>required uint32 fileBufferSize = 5;</code>
     * @return Whether the fileBufferSize field is set.
     */
    boolean hasFileBufferSize();
    /**
     * <code>required uint32 fileBufferSize = 5;</code>
     * @return The fileBufferSize.
     */
    int getFileBufferSize();

    /**
     * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
     * @return Whether the encryptDataTransfer field is set.
     */
    boolean hasEncryptDataTransfer();
    /**
     * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
     * @return The encryptDataTransfer.
     */
    boolean getEncryptDataTransfer();

    /**
     * <code>optional uint64 trashInterval = 7 [default = 0];</code>
     * @return Whether the trashInterval field is set.
     */
    boolean hasTrashInterval();
    /**
     * <code>optional uint64 trashInterval = 7 [default = 0];</code>
     * @return The trashInterval.
     */
    long getTrashInterval();

    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
     * @return Whether the checksumType field is set.
     */
    boolean hasChecksumType();
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
     * @return The checksumType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType();

    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return Whether the keyProviderUri field is set.
     */
    boolean hasKeyProviderUri();
    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return The keyProviderUri.
     */
    java.lang.String getKeyProviderUri();
    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return The bytes for keyProviderUri.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyProviderUriBytes();

    /**
     * <code>optional uint32 policyId = 10 [default = 0];</code>
     * @return Whether the policyId field is set.
     */
    boolean hasPolicyId();
    /**
     * <code>optional uint32 policyId = 10 [default = 0];</code>
     * @return The policyId.
     */
    int getPolicyId();

    /**
     * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
     * @return Whether the snapshotTrashRootEnabled field is set.
     */
    boolean hasSnapshotTrashRootEnabled();
    /**
     * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
     * @return The snapshotTrashRootEnabled.
     */
    boolean getSnapshotTrashRootEnabled();
  }
  /**
   * <pre>
   **
   * HDFS Server Defaults
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
   */
  public static final class FsServerDefaultsProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.FsServerDefaultsProto)
      FsServerDefaultsProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use FsServerDefaultsProto.newBuilder() to construct.
    private FsServerDefaultsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private FsServerDefaultsProto() {
      checksumType_ = 1;
      keyProviderUri_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new FsServerDefaultsProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCKSIZE_FIELD_NUMBER = 1;
    private long blockSize_ = 0L;
    /**
     * <code>required uint64 blockSize = 1;</code>
     * @return Whether the blockSize field is set.
     */
    @java.lang.Override
    public boolean hasBlockSize() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 blockSize = 1;</code>
     * @return The blockSize.
     */
    @java.lang.Override
    public long getBlockSize() {
      return blockSize_;
    }

    public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
    private int bytesPerChecksum_ = 0;
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return Whether the bytesPerChecksum field is set.
     */
    @java.lang.Override
    public boolean hasBytesPerChecksum() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return The bytesPerChecksum.
     */
    @java.lang.Override
    public int getBytesPerChecksum() {
      return bytesPerChecksum_;
    }

    public static final int WRITEPACKETSIZE_FIELD_NUMBER = 3;
    private int writePacketSize_ = 0;
    /**
     * <code>required uint32 writePacketSize = 3;</code>
     * @return Whether the writePacketSize field is set.
     */
    @java.lang.Override
    public boolean hasWritePacketSize() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint32 writePacketSize = 3;</code>
     * @return The writePacketSize.
     */
    @java.lang.Override
    public int getWritePacketSize() {
      return writePacketSize_;
    }

    public static final int REPLICATION_FIELD_NUMBER = 4;
    private int replication_ = 0;
    /**
     * <pre>
     * Actually a short - only 16 bits used
     * </pre>
     *
     * <code>required uint32 replication = 4;</code>
     * @return Whether the replication field is set.
     */
    @java.lang.Override
    public boolean hasReplication() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     * Actually a short - only 16 bits used
     * </pre>
     *
     * <code>required uint32 replication = 4;</code>
     * @return The replication.
     */
    @java.lang.Override
    public int getReplication() {
      return replication_;
    }

    public static final int FILEBUFFERSIZE_FIELD_NUMBER = 5;
    private int fileBufferSize_ = 0;
    /**
     * <code>required uint32 fileBufferSize = 5;</code>
     * @return Whether the fileBufferSize field is set.
     */
    @java.lang.Override
    public boolean hasFileBufferSize() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required uint32 fileBufferSize = 5;</code>
     * @return The fileBufferSize.
     */
    @java.lang.Override
    public int getFileBufferSize() {
      return fileBufferSize_;
    }

    public static final int ENCRYPTDATATRANSFER_FIELD_NUMBER = 6;
    private boolean encryptDataTransfer_ = false;
    /**
     * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
     * @return Whether the encryptDataTransfer field is set.
     */
    @java.lang.Override
    public boolean hasEncryptDataTransfer() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
     * @return The encryptDataTransfer.
     */
    @java.lang.Override
    public boolean getEncryptDataTransfer() {
      return encryptDataTransfer_;
    }

    public static final int TRASHINTERVAL_FIELD_NUMBER = 7;
    private long trashInterval_ = 0L;
    /**
     * <code>optional uint64 trashInterval = 7 [default = 0];</code>
     * @return Whether the trashInterval field is set.
     */
    @java.lang.Override
    public boolean hasTrashInterval() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>optional uint64 trashInterval = 7 [default = 0];</code>
     * @return The trashInterval.
     */
    @java.lang.Override
    public long getTrashInterval() {
      return trashInterval_;
    }

    public static final int CHECKSUMTYPE_FIELD_NUMBER = 8;
    private int checksumType_ = 1;
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
     * @return Whether the checksumType field is set.
     */
    @java.lang.Override public boolean hasChecksumType() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
     * @return The checksumType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(checksumType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result;
    }

    public static final int KEYPROVIDERURI_FIELD_NUMBER = 9;
    @SuppressWarnings("serial")
    private volatile java.lang.Object keyProviderUri_ = "";
    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return Whether the keyProviderUri field is set.
     */
    @java.lang.Override
    public boolean hasKeyProviderUri() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return The keyProviderUri.
     */
    @java.lang.Override
    public java.lang.String getKeyProviderUri() {
      java.lang.Object ref = keyProviderUri_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          keyProviderUri_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string keyProviderUri = 9;</code>
     * @return The bytes for keyProviderUri.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getKeyProviderUriBytes() {
      java.lang.Object ref = keyProviderUri_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        keyProviderUri_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int POLICYID_FIELD_NUMBER = 10;
    private int policyId_ = 0;
    /**
     * <code>optional uint32 policyId = 10 [default = 0];</code>
     * @return Whether the policyId field is set.
     */
    @java.lang.Override
    public boolean hasPolicyId() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * <code>optional uint32 policyId = 10 [default = 0];</code>
     * @return The policyId.
     */
    @java.lang.Override
    public int getPolicyId() {
      return policyId_;
    }

    public static final int SNAPSHOTTRASHROOTENABLED_FIELD_NUMBER = 11;
    private boolean snapshotTrashRootEnabled_ = false;
    /**
     * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
     * @return Whether the snapshotTrashRootEnabled field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotTrashRootEnabled() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
     * @return The snapshotTrashRootEnabled.
     */
    @java.lang.Override
    public boolean getSnapshotTrashRootEnabled() {
      return snapshotTrashRootEnabled_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlockSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBytesPerChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasWritePacketSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasReplication()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFileBufferSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, blockSize_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, bytesPerChecksum_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, writePacketSize_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(4, replication_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt32(5, fileBufferSize_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeBool(6, encryptDataTransfer_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt64(7, trashInterval_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeEnum(8, checksumType_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 9, keyProviderUri_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeUInt32(10, policyId_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeBool(11, snapshotTrashRootEnabled_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, blockSize_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, bytesPerChecksum_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, writePacketSize_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(4, replication_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(5, fileBufferSize_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(6, encryptDataTransfer_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, trashInterval_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(8, checksumType_);
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(9, keyProviderUri_);
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(10, policyId_);
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(11, snapshotTrashRootEnabled_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) obj;

      if (hasBlockSize() != other.hasBlockSize()) return false;
      if (hasBlockSize()) {
        if (getBlockSize()
            != other.getBlockSize()) return false;
      }
      if (hasBytesPerChecksum() != other.hasBytesPerChecksum()) return false;
      if (hasBytesPerChecksum()) {
        if (getBytesPerChecksum()
            != other.getBytesPerChecksum()) return false;
      }
      if (hasWritePacketSize() != other.hasWritePacketSize()) return false;
      if (hasWritePacketSize()) {
        if (getWritePacketSize()
            != other.getWritePacketSize()) return false;
      }
      if (hasReplication() != other.hasReplication()) return false;
      if (hasReplication()) {
        if (getReplication()
            != other.getReplication()) return false;
      }
      if (hasFileBufferSize() != other.hasFileBufferSize()) return false;
      if (hasFileBufferSize()) {
        if (getFileBufferSize()
            != other.getFileBufferSize()) return false;
      }
      if (hasEncryptDataTransfer() != other.hasEncryptDataTransfer()) return false;
      if (hasEncryptDataTransfer()) {
        if (getEncryptDataTransfer()
            != other.getEncryptDataTransfer()) return false;
      }
      if (hasTrashInterval() != other.hasTrashInterval()) return false;
      if (hasTrashInterval()) {
        if (getTrashInterval()
            != other.getTrashInterval()) return false;
      }
      if (hasChecksumType() != other.hasChecksumType()) return false;
      if (hasChecksumType()) {
        if (checksumType_ != other.checksumType_) return false;
      }
      if (hasKeyProviderUri() != other.hasKeyProviderUri()) return false;
      if (hasKeyProviderUri()) {
        if (!getKeyProviderUri()
            .equals(other.getKeyProviderUri())) return false;
      }
      if (hasPolicyId() != other.hasPolicyId()) return false;
      if (hasPolicyId()) {
        if (getPolicyId()
            != other.getPolicyId()) return false;
      }
      if (hasSnapshotTrashRootEnabled() != other.hasSnapshotTrashRootEnabled()) return false;
      if (hasSnapshotTrashRootEnabled()) {
        if (getSnapshotTrashRootEnabled()
            != other.getSnapshotTrashRootEnabled()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlockSize()) {
        hash = (37 * hash) + BLOCKSIZE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockSize());
      }
      if (hasBytesPerChecksum()) {
        hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getBytesPerChecksum();
      }
      if (hasWritePacketSize()) {
        hash = (37 * hash) + WRITEPACKETSIZE_FIELD_NUMBER;
        hash = (53 * hash) + getWritePacketSize();
      }
      if (hasReplication()) {
        hash = (37 * hash) + REPLICATION_FIELD_NUMBER;
        hash = (53 * hash) + getReplication();
      }
      if (hasFileBufferSize()) {
        hash = (37 * hash) + FILEBUFFERSIZE_FIELD_NUMBER;
        hash = (53 * hash) + getFileBufferSize();
      }
      if (hasEncryptDataTransfer()) {
        hash = (37 * hash) + ENCRYPTDATATRANSFER_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getEncryptDataTransfer());
      }
      if (hasTrashInterval()) {
        hash = (37 * hash) + TRASHINTERVAL_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTrashInterval());
      }
      if (hasChecksumType()) {
        hash = (37 * hash) + CHECKSUMTYPE_FIELD_NUMBER;
        hash = (53 * hash) + checksumType_;
      }
      if (hasKeyProviderUri()) {
        hash = (37 * hash) + KEYPROVIDERURI_FIELD_NUMBER;
        hash = (53 * hash) + getKeyProviderUri().hashCode();
      }
      if (hasPolicyId()) {
        hash = (37 * hash) + POLICYID_FIELD_NUMBER;
        hash = (53 * hash) + getPolicyId();
      }
      if (hasSnapshotTrashRootEnabled()) {
        hash = (37 * hash) + SNAPSHOTTRASHROOTENABLED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getSnapshotTrashRootEnabled());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * HDFS Server Defaults
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.FsServerDefaultsProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.FsServerDefaultsProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        blockSize_ = 0L;
        bytesPerChecksum_ = 0;
        writePacketSize_ = 0;
        replication_ = 0;
        fileBufferSize_ = 0;
        encryptDataTransfer_ = false;
        trashInterval_ = 0L;
        checksumType_ = 1;
        keyProviderUri_ = "";
        policyId_ = 0;
        snapshotTrashRootEnabled_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.blockSize_ = blockSize_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.bytesPerChecksum_ = bytesPerChecksum_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.writePacketSize_ = writePacketSize_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.replication_ = replication_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.fileBufferSize_ = fileBufferSize_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.encryptDataTransfer_ = encryptDataTransfer_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.trashInterval_ = trashInterval_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.checksumType_ = checksumType_;
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.keyProviderUri_ = keyProviderUri_;
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.policyId_ = policyId_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.snapshotTrashRootEnabled_ = snapshotTrashRootEnabled_;
          to_bitField0_ |= 0x00000400;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto.getDefaultInstance()) return this;
        if (other.hasBlockSize()) {
          setBlockSize(other.getBlockSize());
        }
        if (other.hasBytesPerChecksum()) {
          setBytesPerChecksum(other.getBytesPerChecksum());
        }
        if (other.hasWritePacketSize()) {
          setWritePacketSize(other.getWritePacketSize());
        }
        if (other.hasReplication()) {
          setReplication(other.getReplication());
        }
        if (other.hasFileBufferSize()) {
          setFileBufferSize(other.getFileBufferSize());
        }
        if (other.hasEncryptDataTransfer()) {
          setEncryptDataTransfer(other.getEncryptDataTransfer());
        }
        if (other.hasTrashInterval()) {
          setTrashInterval(other.getTrashInterval());
        }
        if (other.hasChecksumType()) {
          setChecksumType(other.getChecksumType());
        }
        if (other.hasKeyProviderUri()) {
          keyProviderUri_ = other.keyProviderUri_;
          bitField0_ |= 0x00000100;
          onChanged();
        }
        if (other.hasPolicyId()) {
          setPolicyId(other.getPolicyId());
        }
        if (other.hasSnapshotTrashRootEnabled()) {
          setSnapshotTrashRootEnabled(other.getSnapshotTrashRootEnabled());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlockSize()) {
          return false;
        }
        if (!hasBytesPerChecksum()) {
          return false;
        }
        if (!hasWritePacketSize()) {
          return false;
        }
        if (!hasReplication()) {
          return false;
        }
        if (!hasFileBufferSize()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                blockSize_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                bytesPerChecksum_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                writePacketSize_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                replication_ = input.readUInt32();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 40: {
                fileBufferSize_ = input.readUInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                encryptDataTransfer_ = input.readBool();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                trashInterval_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 64: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(8, tmpRaw);
                } else {
                  checksumType_ = tmpRaw;
                  bitField0_ |= 0x00000080;
                }
                break;
              } // case 64
              case 74: {
                keyProviderUri_ = input.readBytes();
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              case 80: {
                policyId_ = input.readUInt32();
                bitField0_ |= 0x00000200;
                break;
              } // case 80
              case 88: {
                snapshotTrashRootEnabled_ = input.readBool();
                bitField0_ |= 0x00000400;
                break;
              } // case 88
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long blockSize_ ;
      /**
       * <code>required uint64 blockSize = 1;</code>
       * @return Whether the blockSize field is set.
       */
      @java.lang.Override
      public boolean hasBlockSize() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 blockSize = 1;</code>
       * @return The blockSize.
       */
      @java.lang.Override
      public long getBlockSize() {
        return blockSize_;
      }
      /**
       * <code>required uint64 blockSize = 1;</code>
       * @param value The blockSize to set.
       * @return This builder for chaining.
       */
      public Builder setBlockSize(long value) {

        blockSize_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 blockSize = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockSize() {
        bitField0_ = (bitField0_ & ~0x00000001);
        blockSize_ = 0L;
        onChanged();
        return this;
      }

      private int bytesPerChecksum_ ;
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return Whether the bytesPerChecksum field is set.
       */
      @java.lang.Override
      public boolean hasBytesPerChecksum() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return The bytesPerChecksum.
       */
      @java.lang.Override
      public int getBytesPerChecksum() {
        return bytesPerChecksum_;
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @param value The bytesPerChecksum to set.
       * @return This builder for chaining.
       */
      public Builder setBytesPerChecksum(int value) {

        bytesPerChecksum_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBytesPerChecksum() {
        bitField0_ = (bitField0_ & ~0x00000002);
        bytesPerChecksum_ = 0;
        onChanged();
        return this;
      }

      private int writePacketSize_ ;
      /**
       * <code>required uint32 writePacketSize = 3;</code>
       * @return Whether the writePacketSize field is set.
       */
      @java.lang.Override
      public boolean hasWritePacketSize() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint32 writePacketSize = 3;</code>
       * @return The writePacketSize.
       */
      @java.lang.Override
      public int getWritePacketSize() {
        return writePacketSize_;
      }
      /**
       * <code>required uint32 writePacketSize = 3;</code>
       * @param value The writePacketSize to set.
       * @return This builder for chaining.
       */
      public Builder setWritePacketSize(int value) {

        writePacketSize_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 writePacketSize = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearWritePacketSize() {
        bitField0_ = (bitField0_ & ~0x00000004);
        writePacketSize_ = 0;
        onChanged();
        return this;
      }

      private int replication_ ;
      /**
       * <pre>
       * Actually a short - only 16 bits used
       * </pre>
       *
       * <code>required uint32 replication = 4;</code>
       * @return Whether the replication field is set.
       */
      @java.lang.Override
      public boolean hasReplication() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       * Actually a short - only 16 bits used
       * </pre>
       *
       * <code>required uint32 replication = 4;</code>
       * @return The replication.
       */
      @java.lang.Override
      public int getReplication() {
        return replication_;
      }
      /**
       * <pre>
       * Actually a short - only 16 bits used
       * </pre>
       *
       * <code>required uint32 replication = 4;</code>
       * @param value The replication to set.
       * @return This builder for chaining.
       */
      public Builder setReplication(int value) {

        replication_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Actually a short - only 16 bits used
       * </pre>
       *
       * <code>required uint32 replication = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearReplication() {
        bitField0_ = (bitField0_ & ~0x00000008);
        replication_ = 0;
        onChanged();
        return this;
      }

      private int fileBufferSize_ ;
      /**
       * <code>required uint32 fileBufferSize = 5;</code>
       * @return Whether the fileBufferSize field is set.
       */
      @java.lang.Override
      public boolean hasFileBufferSize() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required uint32 fileBufferSize = 5;</code>
       * @return The fileBufferSize.
       */
      @java.lang.Override
      public int getFileBufferSize() {
        return fileBufferSize_;
      }
      /**
       * <code>required uint32 fileBufferSize = 5;</code>
       * @param value The fileBufferSize to set.
       * @return This builder for chaining.
       */
      public Builder setFileBufferSize(int value) {

        fileBufferSize_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 fileBufferSize = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileBufferSize() {
        bitField0_ = (bitField0_ & ~0x00000010);
        fileBufferSize_ = 0;
        onChanged();
        return this;
      }

      private boolean encryptDataTransfer_ ;
      /**
       * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
       * @return Whether the encryptDataTransfer field is set.
       */
      @java.lang.Override
      public boolean hasEncryptDataTransfer() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
       * @return The encryptDataTransfer.
       */
      @java.lang.Override
      public boolean getEncryptDataTransfer() {
        return encryptDataTransfer_;
      }
      /**
       * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
       * @param value The encryptDataTransfer to set.
       * @return This builder for chaining.
       */
      public Builder setEncryptDataTransfer(boolean value) {

        encryptDataTransfer_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool encryptDataTransfer = 6 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearEncryptDataTransfer() {
        bitField0_ = (bitField0_ & ~0x00000020);
        encryptDataTransfer_ = false;
        onChanged();
        return this;
      }

      private long trashInterval_ ;
      /**
       * <code>optional uint64 trashInterval = 7 [default = 0];</code>
       * @return Whether the trashInterval field is set.
       */
      @java.lang.Override
      public boolean hasTrashInterval() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional uint64 trashInterval = 7 [default = 0];</code>
       * @return The trashInterval.
       */
      @java.lang.Override
      public long getTrashInterval() {
        return trashInterval_;
      }
      /**
       * <code>optional uint64 trashInterval = 7 [default = 0];</code>
       * @param value The trashInterval to set.
       * @return This builder for chaining.
       */
      public Builder setTrashInterval(long value) {

        trashInterval_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 trashInterval = 7 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearTrashInterval() {
        bitField0_ = (bitField0_ & ~0x00000040);
        trashInterval_ = 0L;
        onChanged();
        return this;
      }

      private int checksumType_ = 1;
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
       * @return Whether the checksumType field is set.
       */
      @java.lang.Override public boolean hasChecksumType() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
       * @return The checksumType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getChecksumType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(checksumType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32 : result;
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
       * @param value The checksumType to set.
       * @return This builder for chaining.
       */
      public Builder setChecksumType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000080;
        checksumType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];</code>
       * @return This builder for chaining.
       */
      public Builder clearChecksumType() {
        bitField0_ = (bitField0_ & ~0x00000080);
        checksumType_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object keyProviderUri_ = "";
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @return Whether the keyProviderUri field is set.
       */
      public boolean hasKeyProviderUri() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @return The keyProviderUri.
       */
      public java.lang.String getKeyProviderUri() {
        java.lang.Object ref = keyProviderUri_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            keyProviderUri_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @return The bytes for keyProviderUri.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getKeyProviderUriBytes() {
        java.lang.Object ref = keyProviderUri_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          keyProviderUri_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @param value The keyProviderUri to set.
       * @return This builder for chaining.
       */
      public Builder setKeyProviderUri(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        keyProviderUri_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearKeyProviderUri() {
        keyProviderUri_ = getDefaultInstance().getKeyProviderUri();
        bitField0_ = (bitField0_ & ~0x00000100);
        onChanged();
        return this;
      }
      /**
       * <code>optional string keyProviderUri = 9;</code>
       * @param value The bytes for keyProviderUri to set.
       * @return This builder for chaining.
       */
      public Builder setKeyProviderUriBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        keyProviderUri_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }

      private int policyId_ ;
      /**
       * <code>optional uint32 policyId = 10 [default = 0];</code>
       * @return Whether the policyId field is set.
       */
      @java.lang.Override
      public boolean hasPolicyId() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <code>optional uint32 policyId = 10 [default = 0];</code>
       * @return The policyId.
       */
      @java.lang.Override
      public int getPolicyId() {
        return policyId_;
      }
      /**
       * <code>optional uint32 policyId = 10 [default = 0];</code>
       * @param value The policyId to set.
       * @return This builder for chaining.
       */
      public Builder setPolicyId(int value) {

        policyId_ = value;
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 policyId = 10 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearPolicyId() {
        bitField0_ = (bitField0_ & ~0x00000200);
        policyId_ = 0;
        onChanged();
        return this;
      }

      private boolean snapshotTrashRootEnabled_ ;
      /**
       * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
       * @return Whether the snapshotTrashRootEnabled field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotTrashRootEnabled() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
       * @return The snapshotTrashRootEnabled.
       */
      @java.lang.Override
      public boolean getSnapshotTrashRootEnabled() {
        return snapshotTrashRootEnabled_;
      }
      /**
       * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
       * @param value The snapshotTrashRootEnabled to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotTrashRootEnabled(boolean value) {

        snapshotTrashRootEnabled_ = value;
        bitField0_ |= 0x00000400;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool snapshotTrashRootEnabled = 11 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotTrashRootEnabled() {
        bitField0_ = (bitField0_ & ~0x00000400);
        snapshotTrashRootEnabled_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FsServerDefaultsProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.FsServerDefaultsProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<FsServerDefaultsProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<FsServerDefaultsProto>() {
      @java.lang.Override
      public FsServerDefaultsProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<FsServerDefaultsProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<FsServerDefaultsProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DirectoryListingProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DirectoryListingProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> 
        getPartialListingList();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index);
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    int getPartialListingCount();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
        getPartialListingOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
        int index);

    /**
     * <code>required uint32 remainingEntries = 2;</code>
     * @return Whether the remainingEntries field is set.
     */
    boolean hasRemainingEntries();
    /**
     * <code>required uint32 remainingEntries = 2;</code>
     * @return The remainingEntries.
     */
    int getRemainingEntries();
  }
  /**
   * <pre>
   **
   * Directory listing
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.DirectoryListingProto}
   */
  public static final class DirectoryListingProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DirectoryListingProto)
      DirectoryListingProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DirectoryListingProto.newBuilder() to construct.
    private DirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DirectoryListingProto() {
      partialListing_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DirectoryListingProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
    }

    private int bitField0_;
    public static final int PARTIALLISTING_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_;
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
      return partialListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
        getPartialListingOrBuilderList() {
      return partialListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public int getPartialListingCount() {
      return partialListing_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
      return partialListing_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
        int index) {
      return partialListing_.get(index);
    }

    public static final int REMAININGENTRIES_FIELD_NUMBER = 2;
    private int remainingEntries_ = 0;
    /**
     * <code>required uint32 remainingEntries = 2;</code>
     * @return Whether the remainingEntries field is set.
     */
    @java.lang.Override
    public boolean hasRemainingEntries() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint32 remainingEntries = 2;</code>
     * @return The remainingEntries.
     */
    @java.lang.Override
    public int getRemainingEntries() {
      return remainingEntries_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasRemainingEntries()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getPartialListingCount(); i++) {
        if (!getPartialListing(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < partialListing_.size(); i++) {
        output.writeMessage(1, partialListing_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(2, remainingEntries_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < partialListing_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, partialListing_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, remainingEntries_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) obj;

      if (!getPartialListingList()
          .equals(other.getPartialListingList())) return false;
      if (hasRemainingEntries() != other.hasRemainingEntries()) return false;
      if (hasRemainingEntries()) {
        if (getRemainingEntries()
            != other.getRemainingEntries()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getPartialListingCount() > 0) {
        hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER;
        hash = (53 * hash) + getPartialListingList().hashCode();
      }
      if (hasRemainingEntries()) {
        hash = (37 * hash) + REMAININGENTRIES_FIELD_NUMBER;
        hash = (53 * hash) + getRemainingEntries();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Directory listing
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.DirectoryListingProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DirectoryListingProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (partialListingBuilder_ == null) {
          partialListing_ = java.util.Collections.emptyList();
        } else {
          partialListing_ = null;
          partialListingBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        remainingEntries_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result) {
        if (partialListingBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.partialListing_ = partialListing_;
        } else {
          result.partialListing_ = partialListingBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.remainingEntries_ = remainingEntries_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto.getDefaultInstance()) return this;
        if (partialListingBuilder_ == null) {
          if (!other.partialListing_.isEmpty()) {
            if (partialListing_.isEmpty()) {
              partialListing_ = other.partialListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensurePartialListingIsMutable();
              partialListing_.addAll(other.partialListing_);
            }
            onChanged();
          }
        } else {
          if (!other.partialListing_.isEmpty()) {
            if (partialListingBuilder_.isEmpty()) {
              partialListingBuilder_.dispose();
              partialListingBuilder_ = null;
              partialListing_ = other.partialListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
              partialListingBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getPartialListingFieldBuilder() : null;
            } else {
              partialListingBuilder_.addAllMessages(other.partialListing_);
            }
          }
        }
        if (other.hasRemainingEntries()) {
          setRemainingEntries(other.getRemainingEntries());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasRemainingEntries()) {
          return false;
        }
        for (int i = 0; i < getPartialListingCount(); i++) {
          if (!getPartialListing(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER,
                        extensionRegistry);
                if (partialListingBuilder_ == null) {
                  ensurePartialListingIsMutable();
                  partialListing_.add(m);
                } else {
                  partialListingBuilder_.addMessage(m);
                }
                break;
              } // case 10
              case 16: {
                remainingEntries_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_ =
        java.util.Collections.emptyList();
      private void ensurePartialListingIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          partialListing_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto>(partialListing_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
        if (partialListingBuilder_ == null) {
          return java.util.Collections.unmodifiableList(partialListing_);
        } else {
          return partialListingBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public int getPartialListingCount() {
        if (partialListingBuilder_ == null) {
          return partialListing_.size();
        } else {
          return partialListingBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
        if (partialListingBuilder_ == null) {
          return partialListing_.get(index);
        } else {
          return partialListingBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder setPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.set(index, value);
          onChanged();
        } else {
          partialListingBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder setPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.set(index, builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.add(value);
          onChanged();
        } else {
          partialListingBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.add(index, value);
          onChanged();
        } else {
          partialListingBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.add(builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.add(index, builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addAllPartialListing(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> values) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, partialListing_);
          onChanged();
        } else {
          partialListingBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder clearPartialListing() {
        if (partialListingBuilder_ == null) {
          partialListing_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          partialListingBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder removePartialListing(int index) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.remove(index);
          onChanged();
        } else {
          partialListingBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder(
          int index) {
        return getPartialListingFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
          int index) {
        if (partialListingBuilder_ == null) {
          return partialListing_.get(index);  } else {
          return partialListingBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
           getPartialListingOrBuilderList() {
        if (partialListingBuilder_ != null) {
          return partialListingBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(partialListing_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() {
        return getPartialListingFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder(
          int index) {
        return getPartialListingFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder> 
           getPartialListingBuilderList() {
        return getPartialListingFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
          getPartialListingFieldBuilder() {
        if (partialListingBuilder_ == null) {
          partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
                  partialListing_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          partialListing_ = null;
        }
        return partialListingBuilder_;
      }

      private int remainingEntries_ ;
      /**
       * <code>required uint32 remainingEntries = 2;</code>
       * @return Whether the remainingEntries field is set.
       */
      @java.lang.Override
      public boolean hasRemainingEntries() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 remainingEntries = 2;</code>
       * @return The remainingEntries.
       */
      @java.lang.Override
      public int getRemainingEntries() {
        return remainingEntries_;
      }
      /**
       * <code>required uint32 remainingEntries = 2;</code>
       * @param value The remainingEntries to set.
       * @return This builder for chaining.
       */
      public Builder setRemainingEntries(int value) {

        remainingEntries_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 remainingEntries = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearRemainingEntries() {
        bitField0_ = (bitField0_ & ~0x00000002);
        remainingEntries_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DirectoryListingProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DirectoryListingProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryListingProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DirectoryListingProto>() {
      @java.lang.Override
      public DirectoryListingProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryListingProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DirectoryListingProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RemoteExceptionProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RemoteExceptionProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string className = 1;</code>
     * @return Whether the className field is set.
     */
    boolean hasClassName();
    /**
     * <code>required string className = 1;</code>
     * @return The className.
     */
    java.lang.String getClassName();
    /**
     * <code>required string className = 1;</code>
     * @return The bytes for className.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getClassNameBytes();

    /**
     * <code>optional string message = 2;</code>
     * @return Whether the message field is set.
     */
    boolean hasMessage();
    /**
     * <code>optional string message = 2;</code>
     * @return The message.
     */
    java.lang.String getMessage();
    /**
     * <code>optional string message = 2;</code>
     * @return The bytes for message.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto}
   */
  public static final class RemoteExceptionProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.RemoteExceptionProto)
      RemoteExceptionProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RemoteExceptionProto.newBuilder() to construct.
    private RemoteExceptionProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RemoteExceptionProto() {
      className_ = "";
      message_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RemoteExceptionProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class);
    }

    private int bitField0_;
    public static final int CLASSNAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object className_ = "";
    /**
     * <code>required string className = 1;</code>
     * @return Whether the className field is set.
     */
    @java.lang.Override
    public boolean hasClassName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string className = 1;</code>
     * @return The className.
     */
    @java.lang.Override
    public java.lang.String getClassName() {
      java.lang.Object ref = className_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          className_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string className = 1;</code>
     * @return The bytes for className.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getClassNameBytes() {
      java.lang.Object ref = className_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        className_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int MESSAGE_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object message_ = "";
    /**
     * <code>optional string message = 2;</code>
     * @return Whether the message field is set.
     */
    @java.lang.Override
    public boolean hasMessage() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional string message = 2;</code>
     * @return The message.
     */
    @java.lang.Override
    public java.lang.String getMessage() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          message_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string message = 2;</code>
     * @return The bytes for message.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        message_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasClassName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, className_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, message_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, className_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, message_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) obj;

      if (hasClassName() != other.hasClassName()) return false;
      if (hasClassName()) {
        if (!getClassName()
            .equals(other.getClassName())) return false;
      }
      if (hasMessage() != other.hasMessage()) return false;
      if (hasMessage()) {
        if (!getMessage()
            .equals(other.getMessage())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasClassName()) {
        hash = (37 * hash) + CLASSNAME_FIELD_NUMBER;
        hash = (53 * hash) + getClassName().hashCode();
      }
      if (hasMessage()) {
        hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
        hash = (53 * hash) + getMessage().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.RemoteExceptionProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RemoteExceptionProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        className_ = "";
        message_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.className_ = className_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.message_ = message_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) return this;
        if (other.hasClassName()) {
          className_ = other.className_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasMessage()) {
          message_ = other.message_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasClassName()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                className_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                message_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object className_ = "";
      /**
       * <code>required string className = 1;</code>
       * @return Whether the className field is set.
       */
      public boolean hasClassName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string className = 1;</code>
       * @return The className.
       */
      public java.lang.String getClassName() {
        java.lang.Object ref = className_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            className_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string className = 1;</code>
       * @return The bytes for className.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getClassNameBytes() {
        java.lang.Object ref = className_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          className_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string className = 1;</code>
       * @param value The className to set.
       * @return This builder for chaining.
       */
      public Builder setClassName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        className_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string className = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearClassName() {
        className_ = getDefaultInstance().getClassName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string className = 1;</code>
       * @param value The bytes for className to set.
       * @return This builder for chaining.
       */
      public Builder setClassNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        className_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object message_ = "";
      /**
       * <code>optional string message = 2;</code>
       * @return Whether the message field is set.
       */
      public boolean hasMessage() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string message = 2;</code>
       * @return The message.
       */
      public java.lang.String getMessage() {
        java.lang.Object ref = message_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            message_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string message = 2;</code>
       * @return The bytes for message.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getMessageBytes() {
        java.lang.Object ref = message_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          message_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string message = 2;</code>
       * @param value The message to set.
       * @return This builder for chaining.
       */
      public Builder setMessage(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional string message = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearMessage() {
        message_ = getDefaultInstance().getMessage();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>optional string message = 2;</code>
       * @param value The bytes for message to set.
       * @return This builder for chaining.
       */
      public Builder setMessageBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RemoteExceptionProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoteExceptionProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RemoteExceptionProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RemoteExceptionProto>() {
      @java.lang.Override
      public RemoteExceptionProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RemoteExceptionProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RemoteExceptionProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BatchedDirectoryListingProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BatchedDirectoryListingProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> 
        getPartialListingList();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index);
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    int getPartialListingCount();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
        getPartialListingOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
        int index);

    /**
     * <code>required uint32 parentIdx = 2;</code>
     * @return Whether the parentIdx field is set.
     */
    boolean hasParentIdx();
    /**
     * <code>required uint32 parentIdx = 2;</code>
     * @return The parentIdx.
     */
    int getParentIdx();

    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     * @return Whether the exception field is set.
     */
    boolean hasException();
    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     * @return The exception.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException();
    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder();
  }
  /**
   * <pre>
   * Directory listing result for a batched listing call.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto}
   */
  public static final class BatchedDirectoryListingProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BatchedDirectoryListingProto)
      BatchedDirectoryListingProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BatchedDirectoryListingProto.newBuilder() to construct.
    private BatchedDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BatchedDirectoryListingProto() {
      partialListing_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BatchedDirectoryListingProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class);
    }

    private int bitField0_;
    public static final int PARTIALLISTING_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_;
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
      return partialListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
        getPartialListingOrBuilderList() {
      return partialListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public int getPartialListingCount() {
      return partialListing_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
      return partialListing_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
        int index) {
      return partialListing_.get(index);
    }

    public static final int PARENTIDX_FIELD_NUMBER = 2;
    private int parentIdx_ = 0;
    /**
     * <code>required uint32 parentIdx = 2;</code>
     * @return Whether the parentIdx field is set.
     */
    @java.lang.Override
    public boolean hasParentIdx() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint32 parentIdx = 2;</code>
     * @return The parentIdx.
     */
    @java.lang.Override
    public int getParentIdx() {
      return parentIdx_;
    }

    public static final int EXCEPTION_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_;
    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     * @return Whether the exception field is set.
     */
    @java.lang.Override
    public boolean hasException() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     * @return The exception.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() {
      return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_;
    }
    /**
     * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() {
      return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasParentIdx()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getPartialListingCount(); i++) {
        if (!getPartialListing(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasException()) {
        if (!getException().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < partialListing_.size(); i++) {
        output.writeMessage(1, partialListing_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(2, parentIdx_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(3, getException());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < partialListing_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, partialListing_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, parentIdx_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getException());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) obj;

      if (!getPartialListingList()
          .equals(other.getPartialListingList())) return false;
      if (hasParentIdx() != other.hasParentIdx()) return false;
      if (hasParentIdx()) {
        if (getParentIdx()
            != other.getParentIdx()) return false;
      }
      if (hasException() != other.hasException()) return false;
      if (hasException()) {
        if (!getException()
            .equals(other.getException())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getPartialListingCount() > 0) {
        hash = (37 * hash) + PARTIALLISTING_FIELD_NUMBER;
        hash = (53 * hash) + getPartialListingList().hashCode();
      }
      if (hasParentIdx()) {
        hash = (37 * hash) + PARENTIDX_FIELD_NUMBER;
        hash = (53 * hash) + getParentIdx();
      }
      if (hasException()) {
        hash = (37 * hash) + EXCEPTION_FIELD_NUMBER;
        hash = (53 * hash) + getException().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     * Directory listing result for a batched listing call.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.BatchedDirectoryListingProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BatchedDirectoryListingProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPartialListingFieldBuilder();
          getExceptionFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (partialListingBuilder_ == null) {
          partialListing_ = java.util.Collections.emptyList();
        } else {
          partialListing_ = null;
          partialListingBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        parentIdx_ = 0;
        exception_ = null;
        if (exceptionBuilder_ != null) {
          exceptionBuilder_.dispose();
          exceptionBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result) {
        if (partialListingBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            partialListing_ = java.util.Collections.unmodifiableList(partialListing_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.partialListing_ = partialListing_;
        } else {
          result.partialListing_ = partialListingBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.parentIdx_ = parentIdx_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.exception_ = exceptionBuilder_ == null
              ? exception_
              : exceptionBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto.getDefaultInstance()) return this;
        if (partialListingBuilder_ == null) {
          if (!other.partialListing_.isEmpty()) {
            if (partialListing_.isEmpty()) {
              partialListing_ = other.partialListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensurePartialListingIsMutable();
              partialListing_.addAll(other.partialListing_);
            }
            onChanged();
          }
        } else {
          if (!other.partialListing_.isEmpty()) {
            if (partialListingBuilder_.isEmpty()) {
              partialListingBuilder_.dispose();
              partialListingBuilder_ = null;
              partialListing_ = other.partialListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
              partialListingBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getPartialListingFieldBuilder() : null;
            } else {
              partialListingBuilder_.addAllMessages(other.partialListing_);
            }
          }
        }
        if (other.hasParentIdx()) {
          setParentIdx(other.getParentIdx());
        }
        if (other.hasException()) {
          mergeException(other.getException());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasParentIdx()) {
          return false;
        }
        for (int i = 0; i < getPartialListingCount(); i++) {
          if (!getPartialListing(i).isInitialized()) {
            return false;
          }
        }
        if (hasException()) {
          if (!getException().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.PARSER,
                        extensionRegistry);
                if (partialListingBuilder_ == null) {
                  ensurePartialListingIsMutable();
                  partialListing_.add(m);
                } else {
                  partialListingBuilder_.addMessage(m);
                }
                break;
              } // case 10
              case 16: {
                parentIdx_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                input.readMessage(
                    getExceptionFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> partialListing_ =
        java.util.Collections.emptyList();
      private void ensurePartialListingIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          partialListing_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto>(partialListing_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> partialListingBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> getPartialListingList() {
        if (partialListingBuilder_ == null) {
          return java.util.Collections.unmodifiableList(partialListing_);
        } else {
          return partialListingBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public int getPartialListingCount() {
        if (partialListingBuilder_ == null) {
          return partialListing_.size();
        } else {
          return partialListingBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getPartialListing(int index) {
        if (partialListingBuilder_ == null) {
          return partialListing_.get(index);
        } else {
          return partialListingBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder setPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.set(index, value);
          onChanged();
        } else {
          partialListingBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder setPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.set(index, builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.add(value);
          onChanged();
        } else {
          partialListingBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (partialListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensurePartialListingIsMutable();
          partialListing_.add(index, value);
          onChanged();
        } else {
          partialListingBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.add(builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addPartialListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.add(index, builderForValue.build());
          onChanged();
        } else {
          partialListingBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder addAllPartialListing(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto> values) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, partialListing_);
          onChanged();
        } else {
          partialListingBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder clearPartialListing() {
        if (partialListingBuilder_ == null) {
          partialListing_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          partialListingBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public Builder removePartialListing(int index) {
        if (partialListingBuilder_ == null) {
          ensurePartialListingIsMutable();
          partialListing_.remove(index);
          onChanged();
        } else {
          partialListingBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getPartialListingBuilder(
          int index) {
        return getPartialListingFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getPartialListingOrBuilder(
          int index) {
        if (partialListingBuilder_ == null) {
          return partialListing_.get(index);  } else {
          return partialListingBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
           getPartialListingOrBuilderList() {
        if (partialListingBuilder_ != null) {
          return partialListingBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(partialListing_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder() {
        return getPartialListingFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder addPartialListingBuilder(
          int index) {
        return getPartialListingFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.HdfsFileStatusProto partialListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder> 
           getPartialListingBuilderList() {
        return getPartialListingFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
          getPartialListingFieldBuilder() {
        if (partialListingBuilder_ == null) {
          partialListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
                  partialListing_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          partialListing_ = null;
        }
        return partialListingBuilder_;
      }

      private int parentIdx_ ;
      /**
       * <code>required uint32 parentIdx = 2;</code>
       * @return Whether the parentIdx field is set.
       */
      @java.lang.Override
      public boolean hasParentIdx() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 parentIdx = 2;</code>
       * @return The parentIdx.
       */
      @java.lang.Override
      public int getParentIdx() {
        return parentIdx_;
      }
      /**
       * <code>required uint32 parentIdx = 2;</code>
       * @param value The parentIdx to set.
       * @return This builder for chaining.
       */
      public Builder setParentIdx(int value) {

        parentIdx_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 parentIdx = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearParentIdx() {
        bitField0_ = (bitField0_ & ~0x00000002);
        parentIdx_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto exception_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> exceptionBuilder_;
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       * @return Whether the exception field is set.
       */
      public boolean hasException() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       * @return The exception.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto getException() {
        if (exceptionBuilder_ == null) {
          return exception_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_;
        } else {
          return exceptionBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public Builder setException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) {
        if (exceptionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          exception_ = value;
        } else {
          exceptionBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public Builder setException(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder builderForValue) {
        if (exceptionBuilder_ == null) {
          exception_ = builderForValue.build();
        } else {
          exceptionBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public Builder mergeException(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto value) {
        if (exceptionBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            exception_ != null &&
            exception_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance()) {
            getExceptionBuilder().mergeFrom(value);
          } else {
            exception_ = value;
          }
        } else {
          exceptionBuilder_.mergeFrom(value);
        }
        if (exception_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public Builder clearException() {
        bitField0_ = (bitField0_ & ~0x00000004);
        exception_ = null;
        if (exceptionBuilder_ != null) {
          exceptionBuilder_.dispose();
          exceptionBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder getExceptionBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getExceptionFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder getExceptionOrBuilder() {
        if (exceptionBuilder_ != null) {
          return exceptionBuilder_.getMessageOrBuilder();
        } else {
          return exception_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.getDefaultInstance() : exception_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.RemoteExceptionProto exception = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder> 
          getExceptionFieldBuilder() {
        if (exceptionBuilder_ == null) {
          exceptionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteExceptionProtoOrBuilder>(
                  getException(),
                  getParentForChildren(),
                  isClean());
          exception_ = null;
        }
        return exceptionBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BatchedDirectoryListingProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BatchedDirectoryListingProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BatchedDirectoryListingProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BatchedDirectoryListingProto>() {
      @java.lang.Override
      public BatchedDirectoryListingProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BatchedDirectoryListingProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BatchedDirectoryListingProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BatchedDirectoryListingProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshottableDirectoryStatusProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryStatusProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return Whether the dirStatus field is set.
     */
    boolean hasDirStatus();
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return The dirStatus.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus();
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder();

    /**
     * <pre>
     * Fields specific for snapshottable directory
     * </pre>
     *
     * <code>required uint32 snapshot_quota = 2;</code>
     * @return Whether the snapshotQuota field is set.
     */
    boolean hasSnapshotQuota();
    /**
     * <pre>
     * Fields specific for snapshottable directory
     * </pre>
     *
     * <code>required uint32 snapshot_quota = 2;</code>
     * @return The snapshotQuota.
     */
    int getSnapshotQuota();

    /**
     * <code>required uint32 snapshot_number = 3;</code>
     * @return Whether the snapshotNumber field is set.
     */
    boolean hasSnapshotNumber();
    /**
     * <code>required uint32 snapshot_number = 3;</code>
     * @return The snapshotNumber.
     */
    int getSnapshotNumber();

    /**
     * <code>required bytes parent_fullpath = 4;</code>
     * @return Whether the parentFullpath field is set.
     */
    boolean hasParentFullpath();
    /**
     * <code>required bytes parent_fullpath = 4;</code>
     * @return The parentFullpath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath();
  }
  /**
   * <pre>
   **
   * Status of a snapshottable directory: besides the normal information for 
   * a directory status, also include snapshot quota, number of snapshots, and
   * the full path of the parent directory. 
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto}
   */
  public static final class SnapshottableDirectoryStatusProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto)
      SnapshottableDirectoryStatusProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshottableDirectoryStatusProto.newBuilder() to construct.
    private SnapshottableDirectoryStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshottableDirectoryStatusProto() {
      parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshottableDirectoryStatusProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class);
    }

    private int bitField0_;
    public static final int DIRSTATUS_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_;
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return Whether the dirStatus field is set.
     */
    @java.lang.Override
    public boolean hasDirStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return The dirStatus.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
      return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
    }
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
      return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
    }

    public static final int SNAPSHOT_QUOTA_FIELD_NUMBER = 2;
    private int snapshotQuota_ = 0;
    /**
     * <pre>
     * Fields specific for snapshottable directory
     * </pre>
     *
     * <code>required uint32 snapshot_quota = 2;</code>
     * @return Whether the snapshotQuota field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotQuota() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * Fields specific for snapshottable directory
     * </pre>
     *
     * <code>required uint32 snapshot_quota = 2;</code>
     * @return The snapshotQuota.
     */
    @java.lang.Override
    public int getSnapshotQuota() {
      return snapshotQuota_;
    }

    public static final int SNAPSHOT_NUMBER_FIELD_NUMBER = 3;
    private int snapshotNumber_ = 0;
    /**
     * <code>required uint32 snapshot_number = 3;</code>
     * @return Whether the snapshotNumber field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotNumber() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint32 snapshot_number = 3;</code>
     * @return The snapshotNumber.
     */
    @java.lang.Override
    public int getSnapshotNumber() {
      return snapshotNumber_;
    }

    public static final int PARENT_FULLPATH_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes parent_fullpath = 4;</code>
     * @return Whether the parentFullpath field is set.
     */
    @java.lang.Override
    public boolean hasParentFullpath() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bytes parent_fullpath = 4;</code>
     * @return The parentFullpath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() {
      return parentFullpath_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasDirStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSnapshotQuota()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSnapshotNumber()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasParentFullpath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getDirStatus().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getDirStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, snapshotQuota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt32(3, snapshotNumber_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, parentFullpath_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getDirStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, snapshotQuota_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(3, snapshotNumber_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, parentFullpath_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) obj;

      if (hasDirStatus() != other.hasDirStatus()) return false;
      if (hasDirStatus()) {
        if (!getDirStatus()
            .equals(other.getDirStatus())) return false;
      }
      if (hasSnapshotQuota() != other.hasSnapshotQuota()) return false;
      if (hasSnapshotQuota()) {
        if (getSnapshotQuota()
            != other.getSnapshotQuota()) return false;
      }
      if (hasSnapshotNumber() != other.hasSnapshotNumber()) return false;
      if (hasSnapshotNumber()) {
        if (getSnapshotNumber()
            != other.getSnapshotNumber()) return false;
      }
      if (hasParentFullpath() != other.hasParentFullpath()) return false;
      if (hasParentFullpath()) {
        if (!getParentFullpath()
            .equals(other.getParentFullpath())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasDirStatus()) {
        hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER;
        hash = (53 * hash) + getDirStatus().hashCode();
      }
      if (hasSnapshotQuota()) {
        hash = (37 * hash) + SNAPSHOT_QUOTA_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotQuota();
      }
      if (hasSnapshotNumber()) {
        hash = (37 * hash) + SNAPSHOT_NUMBER_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotNumber();
      }
      if (hasParentFullpath()) {
        hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER;
        hash = (53 * hash) + getParentFullpath().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Status of a snapshottable directory: besides the normal information for 
     * a directory status, also include snapshot quota, number of snapshots, and
     * the full path of the parent directory. 
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryStatusProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryStatusProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getDirStatusFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        dirStatus_ = null;
        if (dirStatusBuilder_ != null) {
          dirStatusBuilder_.dispose();
          dirStatusBuilder_ = null;
        }
        snapshotQuota_ = 0;
        snapshotNumber_ = 0;
        parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.dirStatus_ = dirStatusBuilder_ == null
              ? dirStatus_
              : dirStatusBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.snapshotQuota_ = snapshotQuota_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.snapshotNumber_ = snapshotNumber_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.parentFullpath_ = parentFullpath_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance()) return this;
        if (other.hasDirStatus()) {
          mergeDirStatus(other.getDirStatus());
        }
        if (other.hasSnapshotQuota()) {
          setSnapshotQuota(other.getSnapshotQuota());
        }
        if (other.hasSnapshotNumber()) {
          setSnapshotNumber(other.getSnapshotNumber());
        }
        if (other.hasParentFullpath()) {
          setParentFullpath(other.getParentFullpath());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasDirStatus()) {
          return false;
        }
        if (!hasSnapshotQuota()) {
          return false;
        }
        if (!hasSnapshotNumber()) {
          return false;
        }
        if (!hasParentFullpath()) {
          return false;
        }
        if (!getDirStatus().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getDirStatusFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                snapshotQuota_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                snapshotNumber_ = input.readUInt32();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                parentFullpath_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_;
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       * @return Whether the dirStatus field is set.
       */
      public boolean hasDirStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       * @return The dirStatus.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
        if (dirStatusBuilder_ == null) {
          return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
        } else {
          return dirStatusBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (dirStatusBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          dirStatus_ = value;
        } else {
          dirStatusBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder setDirStatus(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (dirStatusBuilder_ == null) {
          dirStatus_ = builderForValue.build();
        } else {
          dirStatusBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (dirStatusBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            dirStatus_ != null &&
            dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) {
            getDirStatusBuilder().mergeFrom(value);
          } else {
            dirStatus_ = value;
          }
        } else {
          dirStatusBuilder_.mergeFrom(value);
        }
        if (dirStatus_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder clearDirStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        dirStatus_ = null;
        if (dirStatusBuilder_ != null) {
          dirStatusBuilder_.dispose();
          dirStatusBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getDirStatusFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
        if (dirStatusBuilder_ != null) {
          return dirStatusBuilder_.getMessageOrBuilder();
        } else {
          return dirStatus_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
          getDirStatusFieldBuilder() {
        if (dirStatusBuilder_ == null) {
          dirStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
                  getDirStatus(),
                  getParentForChildren(),
                  isClean());
          dirStatus_ = null;
        }
        return dirStatusBuilder_;
      }

      private int snapshotQuota_ ;
      /**
       * <pre>
       * Fields specific for snapshottable directory
       * </pre>
       *
       * <code>required uint32 snapshot_quota = 2;</code>
       * @return Whether the snapshotQuota field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotQuota() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Fields specific for snapshottable directory
       * </pre>
       *
       * <code>required uint32 snapshot_quota = 2;</code>
       * @return The snapshotQuota.
       */
      @java.lang.Override
      public int getSnapshotQuota() {
        return snapshotQuota_;
      }
      /**
       * <pre>
       * Fields specific for snapshottable directory
       * </pre>
       *
       * <code>required uint32 snapshot_quota = 2;</code>
       * @param value The snapshotQuota to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotQuota(int value) {

        snapshotQuota_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Fields specific for snapshottable directory
       * </pre>
       *
       * <code>required uint32 snapshot_quota = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotQuota() {
        bitField0_ = (bitField0_ & ~0x00000002);
        snapshotQuota_ = 0;
        onChanged();
        return this;
      }

      private int snapshotNumber_ ;
      /**
       * <code>required uint32 snapshot_number = 3;</code>
       * @return Whether the snapshotNumber field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotNumber() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint32 snapshot_number = 3;</code>
       * @return The snapshotNumber.
       */
      @java.lang.Override
      public int getSnapshotNumber() {
        return snapshotNumber_;
      }
      /**
       * <code>required uint32 snapshot_number = 3;</code>
       * @param value The snapshotNumber to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotNumber(int value) {

        snapshotNumber_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 snapshot_number = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotNumber() {
        bitField0_ = (bitField0_ & ~0x00000004);
        snapshotNumber_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes parent_fullpath = 4;</code>
       * @return Whether the parentFullpath field is set.
       */
      @java.lang.Override
      public boolean hasParentFullpath() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bytes parent_fullpath = 4;</code>
       * @return The parentFullpath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() {
        return parentFullpath_;
      }
      /**
       * <code>required bytes parent_fullpath = 4;</code>
       * @param value The parentFullpath to set.
       * @return This builder for chaining.
       */
      public Builder setParentFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        parentFullpath_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes parent_fullpath = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearParentFullpath() {
        bitField0_ = (bitField0_ & ~0x00000008);
        parentFullpath_ = getDefaultInstance().getParentFullpath();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryStatusProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryStatusProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshottableDirectoryStatusProto>() {
      @java.lang.Override
      public SnapshottableDirectoryStatusProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryStatusProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryStatusProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotStatusProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotStatusProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return Whether the dirStatus field is set.
     */
    boolean hasDirStatus();
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return The dirStatus.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus();
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder();

    /**
     * <pre>
     * Fields specific for snapshot directory
     * </pre>
     *
     * <code>required uint32 snapshotID = 2;</code>
     * @return Whether the snapshotID field is set.
     */
    boolean hasSnapshotID();
    /**
     * <pre>
     * Fields specific for snapshot directory
     * </pre>
     *
     * <code>required uint32 snapshotID = 2;</code>
     * @return The snapshotID.
     */
    int getSnapshotID();

    /**
     * <code>required bytes parent_fullpath = 3;</code>
     * @return Whether the parentFullpath field is set.
     */
    boolean hasParentFullpath();
    /**
     * <code>required bytes parent_fullpath = 3;</code>
     * @return The parentFullpath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath();

    /**
     * <code>required bool isDeleted = 4;</code>
     * @return Whether the isDeleted field is set.
     */
    boolean hasIsDeleted();
    /**
     * <code>required bool isDeleted = 4;</code>
     * @return The isDeleted.
     */
    boolean getIsDeleted();
  }
  /**
   * <pre>
   **
   * Status of a snapshot directory: besides the normal information for
   * a directory status, also include snapshot ID, and
   * the full path of the parent directory.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotStatusProto}
   */
  public static final class SnapshotStatusProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotStatusProto)
      SnapshotStatusProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotStatusProto.newBuilder() to construct.
    private SnapshotStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotStatusProto() {
      parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotStatusProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder.class);
    }

    private int bitField0_;
    public static final int DIRSTATUS_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_;
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return Whether the dirStatus field is set.
     */
    @java.lang.Override
    public boolean hasDirStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     * @return The dirStatus.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
      return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
    }
    /**
     * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
      return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
    }

    public static final int SNAPSHOTID_FIELD_NUMBER = 2;
    private int snapshotID_ = 0;
    /**
     * <pre>
     * Fields specific for snapshot directory
     * </pre>
     *
     * <code>required uint32 snapshotID = 2;</code>
     * @return Whether the snapshotID field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotID() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     * Fields specific for snapshot directory
     * </pre>
     *
     * <code>required uint32 snapshotID = 2;</code>
     * @return The snapshotID.
     */
    @java.lang.Override
    public int getSnapshotID() {
      return snapshotID_;
    }

    public static final int PARENT_FULLPATH_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes parent_fullpath = 3;</code>
     * @return Whether the parentFullpath field is set.
     */
    @java.lang.Override
    public boolean hasParentFullpath() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bytes parent_fullpath = 3;</code>
     * @return The parentFullpath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() {
      return parentFullpath_;
    }

    public static final int ISDELETED_FIELD_NUMBER = 4;
    private boolean isDeleted_ = false;
    /**
     * <code>required bool isDeleted = 4;</code>
     * @return Whether the isDeleted field is set.
     */
    @java.lang.Override
    public boolean hasIsDeleted() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required bool isDeleted = 4;</code>
     * @return The isDeleted.
     */
    @java.lang.Override
    public boolean getIsDeleted() {
      return isDeleted_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasDirStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSnapshotID()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasParentFullpath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIsDeleted()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getDirStatus().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getDirStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, snapshotID_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, parentFullpath_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBool(4, isDeleted_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getDirStatus());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, snapshotID_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, parentFullpath_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, isDeleted_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto) obj;

      if (hasDirStatus() != other.hasDirStatus()) return false;
      if (hasDirStatus()) {
        if (!getDirStatus()
            .equals(other.getDirStatus())) return false;
      }
      if (hasSnapshotID() != other.hasSnapshotID()) return false;
      if (hasSnapshotID()) {
        if (getSnapshotID()
            != other.getSnapshotID()) return false;
      }
      if (hasParentFullpath() != other.hasParentFullpath()) return false;
      if (hasParentFullpath()) {
        if (!getParentFullpath()
            .equals(other.getParentFullpath())) return false;
      }
      if (hasIsDeleted() != other.hasIsDeleted()) return false;
      if (hasIsDeleted()) {
        if (getIsDeleted()
            != other.getIsDeleted()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasDirStatus()) {
        hash = (37 * hash) + DIRSTATUS_FIELD_NUMBER;
        hash = (53 * hash) + getDirStatus().hashCode();
      }
      if (hasSnapshotID()) {
        hash = (37 * hash) + SNAPSHOTID_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotID();
      }
      if (hasParentFullpath()) {
        hash = (37 * hash) + PARENT_FULLPATH_FIELD_NUMBER;
        hash = (53 * hash) + getParentFullpath().hashCode();
      }
      if (hasIsDeleted()) {
        hash = (37 * hash) + ISDELETED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsDeleted());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Status of a snapshot directory: besides the normal information for
     * a directory status, also include snapshot ID, and
     * the full path of the parent directory.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotStatusProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotStatusProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getDirStatusFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        dirStatus_ = null;
        if (dirStatusBuilder_ != null) {
          dirStatusBuilder_.dispose();
          dirStatusBuilder_ = null;
        }
        snapshotID_ = 0;
        parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        isDeleted_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.dirStatus_ = dirStatusBuilder_ == null
              ? dirStatus_
              : dirStatusBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.snapshotID_ = snapshotID_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.parentFullpath_ = parentFullpath_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.isDeleted_ = isDeleted_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance()) return this;
        if (other.hasDirStatus()) {
          mergeDirStatus(other.getDirStatus());
        }
        if (other.hasSnapshotID()) {
          setSnapshotID(other.getSnapshotID());
        }
        if (other.hasParentFullpath()) {
          setParentFullpath(other.getParentFullpath());
        }
        if (other.hasIsDeleted()) {
          setIsDeleted(other.getIsDeleted());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasDirStatus()) {
          return false;
        }
        if (!hasSnapshotID()) {
          return false;
        }
        if (!hasParentFullpath()) {
          return false;
        }
        if (!hasIsDeleted()) {
          return false;
        }
        if (!getDirStatus().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getDirStatusFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                snapshotID_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                parentFullpath_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                isDeleted_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto dirStatus_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> dirStatusBuilder_;
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       * @return Whether the dirStatus field is set.
       */
      public boolean hasDirStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       * @return The dirStatus.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto getDirStatus() {
        if (dirStatusBuilder_ == null) {
          return dirStatus_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
        } else {
          return dirStatusBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder setDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (dirStatusBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          dirStatus_ = value;
        } else {
          dirStatusBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder setDirStatus(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder builderForValue) {
        if (dirStatusBuilder_ == null) {
          dirStatus_ = builderForValue.build();
        } else {
          dirStatusBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder mergeDirStatus(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto value) {
        if (dirStatusBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            dirStatus_ != null &&
            dirStatus_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance()) {
            getDirStatusBuilder().mergeFrom(value);
          } else {
            dirStatus_ = value;
          }
        } else {
          dirStatusBuilder_.mergeFrom(value);
        }
        if (dirStatus_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public Builder clearDirStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        dirStatus_ = null;
        if (dirStatusBuilder_ != null) {
          dirStatusBuilder_.dispose();
          dirStatusBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder getDirStatusBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getDirStatusFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder getDirStatusOrBuilder() {
        if (dirStatusBuilder_ != null) {
          return dirStatusBuilder_.getMessageOrBuilder();
        } else {
          return dirStatus_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.getDefaultInstance() : dirStatus_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.HdfsFileStatusProto dirStatus = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder> 
          getDirStatusFieldBuilder() {
        if (dirStatusBuilder_ == null) {
          dirStatusBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProtoOrBuilder>(
                  getDirStatus(),
                  getParentForChildren(),
                  isClean());
          dirStatus_ = null;
        }
        return dirStatusBuilder_;
      }

      private int snapshotID_ ;
      /**
       * <pre>
       * Fields specific for snapshot directory
       * </pre>
       *
       * <code>required uint32 snapshotID = 2;</code>
       * @return Whether the snapshotID field is set.
       */
      @java.lang.Override
      public boolean hasSnapshotID() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       * Fields specific for snapshot directory
       * </pre>
       *
       * <code>required uint32 snapshotID = 2;</code>
       * @return The snapshotID.
       */
      @java.lang.Override
      public int getSnapshotID() {
        return snapshotID_;
      }
      /**
       * <pre>
       * Fields specific for snapshot directory
       * </pre>
       *
       * <code>required uint32 snapshotID = 2;</code>
       * @param value The snapshotID to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotID(int value) {

        snapshotID_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * Fields specific for snapshot directory
       * </pre>
       *
       * <code>required uint32 snapshotID = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotID() {
        bitField0_ = (bitField0_ & ~0x00000002);
        snapshotID_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString parentFullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes parent_fullpath = 3;</code>
       * @return Whether the parentFullpath field is set.
       */
      @java.lang.Override
      public boolean hasParentFullpath() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bytes parent_fullpath = 3;</code>
       * @return The parentFullpath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getParentFullpath() {
        return parentFullpath_;
      }
      /**
       * <code>required bytes parent_fullpath = 3;</code>
       * @param value The parentFullpath to set.
       * @return This builder for chaining.
       */
      public Builder setParentFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        parentFullpath_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes parent_fullpath = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearParentFullpath() {
        bitField0_ = (bitField0_ & ~0x00000004);
        parentFullpath_ = getDefaultInstance().getParentFullpath();
        onChanged();
        return this;
      }

      private boolean isDeleted_ ;
      /**
       * <code>required bool isDeleted = 4;</code>
       * @return Whether the isDeleted field is set.
       */
      @java.lang.Override
      public boolean hasIsDeleted() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bool isDeleted = 4;</code>
       * @return The isDeleted.
       */
      @java.lang.Override
      public boolean getIsDeleted() {
        return isDeleted_;
      }
      /**
       * <code>required bool isDeleted = 4;</code>
       * @param value The isDeleted to set.
       * @return This builder for chaining.
       */
      public Builder setIsDeleted(boolean value) {

        isDeleted_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bool isDeleted = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearIsDeleted() {
        bitField0_ = (bitField0_ & ~0x00000008);
        isDeleted_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotStatusProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotStatusProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotStatusProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotStatusProto>() {
      @java.lang.Override
      public SnapshotStatusProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotStatusProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotStatusProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshottableDirectoryListingProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshottableDirectoryListingProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> 
        getSnapshottableDirListingList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index);
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    int getSnapshottableDirListingCount();
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> 
        getSnapshottableDirListingOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Snapshottable directory listing
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto}
   */
  public static final class SnapshottableDirectoryListingProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshottableDirectoryListingProto)
      SnapshottableDirectoryListingProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshottableDirectoryListingProto.newBuilder() to construct.
    private SnapshottableDirectoryListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshottableDirectoryListingProto() {
      snapshottableDirListing_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshottableDirectoryListingProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class);
    }

    public static final int SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> snapshottableDirListing_;
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> getSnapshottableDirListingList() {
      return snapshottableDirListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> 
        getSnapshottableDirListingOrBuilderList() {
      return snapshottableDirListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    @java.lang.Override
    public int getSnapshottableDirListingCount() {
      return snapshottableDirListing_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) {
      return snapshottableDirListing_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
        int index) {
      return snapshottableDirListing_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getSnapshottableDirListingCount(); i++) {
        if (!getSnapshottableDirListing(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < snapshottableDirListing_.size(); i++) {
        output.writeMessage(1, snapshottableDirListing_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < snapshottableDirListing_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, snapshottableDirListing_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) obj;

      if (!getSnapshottableDirListingList()
          .equals(other.getSnapshottableDirListingList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getSnapshottableDirListingCount() > 0) {
        hash = (37 * hash) + SNAPSHOTTABLEDIRLISTING_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshottableDirListingList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshottable directory listing
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshottableDirectoryListingProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshottableDirectoryListingProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (snapshottableDirListingBuilder_ == null) {
          snapshottableDirListing_ = java.util.Collections.emptyList();
        } else {
          snapshottableDirListing_ = null;
          snapshottableDirListingBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result) {
        if (snapshottableDirListingBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            snapshottableDirListing_ = java.util.Collections.unmodifiableList(snapshottableDirListing_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.snapshottableDirListing_ = snapshottableDirListing_;
        } else {
          result.snapshottableDirListing_ = snapshottableDirListingBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto.getDefaultInstance()) return this;
        if (snapshottableDirListingBuilder_ == null) {
          if (!other.snapshottableDirListing_.isEmpty()) {
            if (snapshottableDirListing_.isEmpty()) {
              snapshottableDirListing_ = other.snapshottableDirListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureSnapshottableDirListingIsMutable();
              snapshottableDirListing_.addAll(other.snapshottableDirListing_);
            }
            onChanged();
          }
        } else {
          if (!other.snapshottableDirListing_.isEmpty()) {
            if (snapshottableDirListingBuilder_.isEmpty()) {
              snapshottableDirListingBuilder_.dispose();
              snapshottableDirListingBuilder_ = null;
              snapshottableDirListing_ = other.snapshottableDirListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
              snapshottableDirListingBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getSnapshottableDirListingFieldBuilder() : null;
            } else {
              snapshottableDirListingBuilder_.addAllMessages(other.snapshottableDirListing_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getSnapshottableDirListingCount(); i++) {
          if (!getSnapshottableDirListing(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.PARSER,
                        extensionRegistry);
                if (snapshottableDirListingBuilder_ == null) {
                  ensureSnapshottableDirListingIsMutable();
                  snapshottableDirListing_.add(m);
                } else {
                  snapshottableDirListingBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> snapshottableDirListing_ =
        java.util.Collections.emptyList();
      private void ensureSnapshottableDirListingIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          snapshottableDirListing_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto>(snapshottableDirListing_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> snapshottableDirListingBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> getSnapshottableDirListingList() {
        if (snapshottableDirListingBuilder_ == null) {
          return java.util.Collections.unmodifiableList(snapshottableDirListing_);
        } else {
          return snapshottableDirListingBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public int getSnapshottableDirListingCount() {
        if (snapshottableDirListingBuilder_ == null) {
          return snapshottableDirListing_.size();
        } else {
          return snapshottableDirListingBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto getSnapshottableDirListing(int index) {
        if (snapshottableDirListingBuilder_ == null) {
          return snapshottableDirListing_.get(index);
        } else {
          return snapshottableDirListingBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder setSnapshottableDirListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
        if (snapshottableDirListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.set(index, value);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder setSnapshottableDirListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
        if (snapshottableDirListingBuilder_ == null) {
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.set(index, builderForValue.build());
          onChanged();
        } else {
          snapshottableDirListingBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder addSnapshottableDirListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
        if (snapshottableDirListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.add(value);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder addSnapshottableDirListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto value) {
        if (snapshottableDirListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.add(index, value);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder addSnapshottableDirListing(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
        if (snapshottableDirListingBuilder_ == null) {
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.add(builderForValue.build());
          onChanged();
        } else {
          snapshottableDirListingBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder addSnapshottableDirListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder builderForValue) {
        if (snapshottableDirListingBuilder_ == null) {
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.add(index, builderForValue.build());
          onChanged();
        } else {
          snapshottableDirListingBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder addAllSnapshottableDirListing(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto> values) {
        if (snapshottableDirListingBuilder_ == null) {
          ensureSnapshottableDirListingIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, snapshottableDirListing_);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder clearSnapshottableDirListing() {
        if (snapshottableDirListingBuilder_ == null) {
          snapshottableDirListing_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public Builder removeSnapshottableDirListing(int index) {
        if (snapshottableDirListingBuilder_ == null) {
          ensureSnapshottableDirListingIsMutable();
          snapshottableDirListing_.remove(index);
          onChanged();
        } else {
          snapshottableDirListingBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder getSnapshottableDirListingBuilder(
          int index) {
        return getSnapshottableDirListingFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder getSnapshottableDirListingOrBuilder(
          int index) {
        if (snapshottableDirListingBuilder_ == null) {
          return snapshottableDirListing_.get(index);  } else {
          return snapshottableDirListingBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> 
           getSnapshottableDirListingOrBuilderList() {
        if (snapshottableDirListingBuilder_ != null) {
          return snapshottableDirListingBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(snapshottableDirListing_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder() {
        return getSnapshottableDirListingFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder addSnapshottableDirListingBuilder(
          int index) {
        return getSnapshottableDirListingFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshottableDirectoryStatusProto snapshottableDirListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder> 
           getSnapshottableDirListingBuilderList() {
        return getSnapshottableDirListingFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder> 
          getSnapshottableDirListingFieldBuilder() {
        if (snapshottableDirListingBuilder_ == null) {
          snapshottableDirListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProtoOrBuilder>(
                  snapshottableDirListing_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          snapshottableDirListing_ = null;
        }
        return snapshottableDirListingBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshottableDirectoryListingProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshottableDirectoryListingProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryListingProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshottableDirectoryListingProto>() {
      @java.lang.Override
      public SnapshottableDirectoryListingProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryListingProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshottableDirectoryListingProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotListingProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotListingProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> 
        getSnapshotListingList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index);
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    int getSnapshotListingCount();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> 
        getSnapshotListingOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Snapshot listing
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotListingProto}
   */
  public static final class SnapshotListingProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotListingProto)
      SnapshotListingProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotListingProto.newBuilder() to construct.
    private SnapshotListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotListingProto() {
      snapshotListing_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotListingProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.Builder.class);
    }

    public static final int SNAPSHOTLISTING_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> snapshotListing_;
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> getSnapshotListingList() {
      return snapshotListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> 
        getSnapshotListingOrBuilderList() {
      return snapshotListing_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    @java.lang.Override
    public int getSnapshotListingCount() {
      return snapshotListing_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index) {
      return snapshotListing_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder(
        int index) {
      return snapshotListing_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      for (int i = 0; i < getSnapshotListingCount(); i++) {
        if (!getSnapshotListing(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < snapshotListing_.size(); i++) {
        output.writeMessage(1, snapshotListing_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < snapshotListing_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, snapshotListing_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto) obj;

      if (!getSnapshotListingList()
          .equals(other.getSnapshotListingList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getSnapshotListingCount() > 0) {
        hash = (37 * hash) + SNAPSHOTLISTING_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotListingList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshot listing
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotListingProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotListingProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (snapshotListingBuilder_ == null) {
          snapshotListing_ = java.util.Collections.emptyList();
        } else {
          snapshotListing_ = null;
          snapshotListingBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotListingProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result) {
        if (snapshotListingBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            snapshotListing_ = java.util.Collections.unmodifiableList(snapshotListing_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.snapshotListing_ = snapshotListing_;
        } else {
          result.snapshotListing_ = snapshotListingBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto result) {
        int from_bitField0_ = bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto.getDefaultInstance()) return this;
        if (snapshotListingBuilder_ == null) {
          if (!other.snapshotListing_.isEmpty()) {
            if (snapshotListing_.isEmpty()) {
              snapshotListing_ = other.snapshotListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureSnapshotListingIsMutable();
              snapshotListing_.addAll(other.snapshotListing_);
            }
            onChanged();
          }
        } else {
          if (!other.snapshotListing_.isEmpty()) {
            if (snapshotListingBuilder_.isEmpty()) {
              snapshotListingBuilder_.dispose();
              snapshotListingBuilder_ = null;
              snapshotListing_ = other.snapshotListing_;
              bitField0_ = (bitField0_ & ~0x00000001);
              snapshotListingBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getSnapshotListingFieldBuilder() : null;
            } else {
              snapshotListingBuilder_.addAllMessages(other.snapshotListing_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        for (int i = 0; i < getSnapshotListingCount(); i++) {
          if (!getSnapshotListing(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.PARSER,
                        extensionRegistry);
                if (snapshotListingBuilder_ == null) {
                  ensureSnapshotListingIsMutable();
                  snapshotListing_.add(m);
                } else {
                  snapshotListingBuilder_.addMessage(m);
                }
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> snapshotListing_ =
        java.util.Collections.emptyList();
      private void ensureSnapshotListingIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          snapshotListing_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto>(snapshotListing_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> snapshotListingBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> getSnapshotListingList() {
        if (snapshotListingBuilder_ == null) {
          return java.util.Collections.unmodifiableList(snapshotListing_);
        } else {
          return snapshotListingBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public int getSnapshotListingCount() {
        if (snapshotListingBuilder_ == null) {
          return snapshotListing_.size();
        } else {
          return snapshotListingBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto getSnapshotListing(int index) {
        if (snapshotListingBuilder_ == null) {
          return snapshotListing_.get(index);
        } else {
          return snapshotListingBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder setSnapshotListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) {
        if (snapshotListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshotListingIsMutable();
          snapshotListing_.set(index, value);
          onChanged();
        } else {
          snapshotListingBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder setSnapshotListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) {
        if (snapshotListingBuilder_ == null) {
          ensureSnapshotListingIsMutable();
          snapshotListing_.set(index, builderForValue.build());
          onChanged();
        } else {
          snapshotListingBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder addSnapshotListing(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) {
        if (snapshotListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshotListingIsMutable();
          snapshotListing_.add(value);
          onChanged();
        } else {
          snapshotListingBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder addSnapshotListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto value) {
        if (snapshotListingBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureSnapshotListingIsMutable();
          snapshotListing_.add(index, value);
          onChanged();
        } else {
          snapshotListingBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder addSnapshotListing(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) {
        if (snapshotListingBuilder_ == null) {
          ensureSnapshotListingIsMutable();
          snapshotListing_.add(builderForValue.build());
          onChanged();
        } else {
          snapshotListingBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder addSnapshotListing(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder builderForValue) {
        if (snapshotListingBuilder_ == null) {
          ensureSnapshotListingIsMutable();
          snapshotListing_.add(index, builderForValue.build());
          onChanged();
        } else {
          snapshotListingBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder addAllSnapshotListing(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto> values) {
        if (snapshotListingBuilder_ == null) {
          ensureSnapshotListingIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, snapshotListing_);
          onChanged();
        } else {
          snapshotListingBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder clearSnapshotListing() {
        if (snapshotListingBuilder_ == null) {
          snapshotListing_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          snapshotListingBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public Builder removeSnapshotListing(int index) {
        if (snapshotListingBuilder_ == null) {
          ensureSnapshotListingIsMutable();
          snapshotListing_.remove(index);
          onChanged();
        } else {
          snapshotListingBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder getSnapshotListingBuilder(
          int index) {
        return getSnapshotListingFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder getSnapshotListingOrBuilder(
          int index) {
        if (snapshotListingBuilder_ == null) {
          return snapshotListing_.get(index);  } else {
          return snapshotListingBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> 
           getSnapshotListingOrBuilderList() {
        if (snapshotListingBuilder_ != null) {
          return snapshotListingBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(snapshotListing_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder addSnapshotListingBuilder() {
        return getSnapshotListingFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder addSnapshotListingBuilder(
          int index) {
        return getSnapshotListingFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotStatusProto snapshotListing = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder> 
           getSnapshotListingBuilderList() {
        return getSnapshotListingFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder> 
          getSnapshotListingFieldBuilder() {
        if (snapshotListingBuilder_ == null) {
          snapshotListingBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotStatusProtoOrBuilder>(
                  snapshotListing_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          snapshotListing_ = null;
        }
        return snapshotListingBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotListingProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotListingProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotListingProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotListingProto>() {
      @java.lang.Override
      public SnapshotListingProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotListingProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotListingProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotListingProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffReportEntryProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportEntryProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes fullpath = 1;</code>
     * @return Whether the fullpath field is set.
     */
    boolean hasFullpath();
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return The fullpath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath();

    /**
     * <code>required string modificationLabel = 2;</code>
     * @return Whether the modificationLabel field is set.
     */
    boolean hasModificationLabel();
    /**
     * <code>required string modificationLabel = 2;</code>
     * @return The modificationLabel.
     */
    java.lang.String getModificationLabel();
    /**
     * <code>required string modificationLabel = 2;</code>
     * @return The bytes for modificationLabel.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getModificationLabelBytes();

    /**
     * <code>optional bytes targetPath = 3;</code>
     * @return Whether the targetPath field is set.
     */
    boolean hasTargetPath();
    /**
     * <code>optional bytes targetPath = 3;</code>
     * @return The targetPath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath();
  }
  /**
   * <pre>
   **
   * Snapshot diff report entry
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto}
   */
  public static final class SnapshotDiffReportEntryProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportEntryProto)
      SnapshotDiffReportEntryProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffReportEntryProto.newBuilder() to construct.
    private SnapshotDiffReportEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffReportEntryProto() {
      fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      modificationLabel_ = "";
      targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffReportEntryProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class);
    }

    private int bitField0_;
    public static final int FULLPATH_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return Whether the fullpath field is set.
     */
    @java.lang.Override
    public boolean hasFullpath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return The fullpath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() {
      return fullpath_;
    }

    public static final int MODIFICATIONLABEL_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object modificationLabel_ = "";
    /**
     * <code>required string modificationLabel = 2;</code>
     * @return Whether the modificationLabel field is set.
     */
    @java.lang.Override
    public boolean hasModificationLabel() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string modificationLabel = 2;</code>
     * @return The modificationLabel.
     */
    @java.lang.Override
    public java.lang.String getModificationLabel() {
      java.lang.Object ref = modificationLabel_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          modificationLabel_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string modificationLabel = 2;</code>
     * @return The bytes for modificationLabel.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getModificationLabelBytes() {
      java.lang.Object ref = modificationLabel_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        modificationLabel_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TARGETPATH_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes targetPath = 3;</code>
     * @return Whether the targetPath field is set.
     */
    @java.lang.Override
    public boolean hasTargetPath() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional bytes targetPath = 3;</code>
     * @return The targetPath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() {
      return targetPath_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasFullpath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasModificationLabel()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, fullpath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, modificationLabel_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, targetPath_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, fullpath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, modificationLabel_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, targetPath_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) obj;

      if (hasFullpath() != other.hasFullpath()) return false;
      if (hasFullpath()) {
        if (!getFullpath()
            .equals(other.getFullpath())) return false;
      }
      if (hasModificationLabel() != other.hasModificationLabel()) return false;
      if (hasModificationLabel()) {
        if (!getModificationLabel()
            .equals(other.getModificationLabel())) return false;
      }
      if (hasTargetPath() != other.hasTargetPath()) return false;
      if (hasTargetPath()) {
        if (!getTargetPath()
            .equals(other.getTargetPath())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasFullpath()) {
        hash = (37 * hash) + FULLPATH_FIELD_NUMBER;
        hash = (53 * hash) + getFullpath().hashCode();
      }
      if (hasModificationLabel()) {
        hash = (37 * hash) + MODIFICATIONLABEL_FIELD_NUMBER;
        hash = (53 * hash) + getModificationLabel().hashCode();
      }
      if (hasTargetPath()) {
        hash = (37 * hash) + TARGETPATH_FIELD_NUMBER;
        hash = (53 * hash) + getTargetPath().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshot diff report entry
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportEntryProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportEntryProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        modificationLabel_ = "";
        targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.fullpath_ = fullpath_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.modificationLabel_ = modificationLabel_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.targetPath_ = targetPath_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance()) return this;
        if (other.hasFullpath()) {
          setFullpath(other.getFullpath());
        }
        if (other.hasModificationLabel()) {
          modificationLabel_ = other.modificationLabel_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasTargetPath()) {
          setTargetPath(other.getTargetPath());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasFullpath()) {
          return false;
        }
        if (!hasModificationLabel()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                fullpath_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                modificationLabel_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                targetPath_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return Whether the fullpath field is set.
       */
      @java.lang.Override
      public boolean hasFullpath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return The fullpath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() {
        return fullpath_;
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @param value The fullpath to set.
       * @return This builder for chaining.
       */
      public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        fullpath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFullpath() {
        bitField0_ = (bitField0_ & ~0x00000001);
        fullpath_ = getDefaultInstance().getFullpath();
        onChanged();
        return this;
      }

      private java.lang.Object modificationLabel_ = "";
      /**
       * <code>required string modificationLabel = 2;</code>
       * @return Whether the modificationLabel field is set.
       */
      public boolean hasModificationLabel() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string modificationLabel = 2;</code>
       * @return The modificationLabel.
       */
      public java.lang.String getModificationLabel() {
        java.lang.Object ref = modificationLabel_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            modificationLabel_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string modificationLabel = 2;</code>
       * @return The bytes for modificationLabel.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getModificationLabelBytes() {
        java.lang.Object ref = modificationLabel_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          modificationLabel_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string modificationLabel = 2;</code>
       * @param value The modificationLabel to set.
       * @return This builder for chaining.
       */
      public Builder setModificationLabel(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        modificationLabel_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string modificationLabel = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearModificationLabel() {
        modificationLabel_ = getDefaultInstance().getModificationLabel();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string modificationLabel = 2;</code>
       * @param value The bytes for modificationLabel to set.
       * @return This builder for chaining.
       */
      public Builder setModificationLabelBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        modificationLabel_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes targetPath = 3;</code>
       * @return Whether the targetPath field is set.
       */
      @java.lang.Override
      public boolean hasTargetPath() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes targetPath = 3;</code>
       * @return The targetPath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() {
        return targetPath_;
      }
      /**
       * <code>optional bytes targetPath = 3;</code>
       * @param value The targetPath to set.
       * @return This builder for chaining.
       */
      public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        targetPath_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes targetPath = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetPath() {
        bitField0_ = (bitField0_ & ~0x00000004);
        targetPath_ = getDefaultInstance().getTargetPath();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportEntryProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportEntryProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportEntryProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffReportEntryProto>() {
      @java.lang.Override
      public SnapshotDiffReportEntryProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportEntryProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportEntryProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffReportProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return Whether the snapshotRoot field is set.
     */
    boolean hasSnapshotRoot();
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return The snapshotRoot.
     */
    java.lang.String getSnapshotRoot();
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return The bytes for snapshotRoot.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotRootBytes();

    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return Whether the fromSnapshot field is set.
     */
    boolean hasFromSnapshot();
    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return The fromSnapshot.
     */
    java.lang.String getFromSnapshot();
    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return The bytes for fromSnapshot.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getFromSnapshotBytes();

    /**
     * <code>required string toSnapshot = 3;</code>
     * @return Whether the toSnapshot field is set.
     */
    boolean hasToSnapshot();
    /**
     * <code>required string toSnapshot = 3;</code>
     * @return The toSnapshot.
     */
    java.lang.String getToSnapshot();
    /**
     * <code>required string toSnapshot = 3;</code>
     * @return The bytes for toSnapshot.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getToSnapshotBytes();

    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> 
        getDiffReportEntriesList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index);
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    int getDiffReportEntriesCount();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> 
        getDiffReportEntriesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
        int index);
  }
  /**
   * <pre>
   **
   * Snapshot diff report
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto}
   */
  public static final class SnapshotDiffReportProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportProto)
      SnapshotDiffReportProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffReportProto.newBuilder() to construct.
    private SnapshotDiffReportProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffReportProto() {
      snapshotRoot_ = "";
      fromSnapshot_ = "";
      toSnapshot_ = "";
      diffReportEntries_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffReportProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class);
    }

    private int bitField0_;
    public static final int SNAPSHOTROOT_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object snapshotRoot_ = "";
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return Whether the snapshotRoot field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotRoot() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return The snapshotRoot.
     */
    @java.lang.Override
    public java.lang.String getSnapshotRoot() {
      java.lang.Object ref = snapshotRoot_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          snapshotRoot_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>required string snapshotRoot = 1;</code>
     * @return The bytes for snapshotRoot.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotRootBytes() {
      java.lang.Object ref = snapshotRoot_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        snapshotRoot_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int FROMSNAPSHOT_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object fromSnapshot_ = "";
    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return Whether the fromSnapshot field is set.
     */
    @java.lang.Override
    public boolean hasFromSnapshot() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return The fromSnapshot.
     */
    @java.lang.Override
    public java.lang.String getFromSnapshot() {
      java.lang.Object ref = fromSnapshot_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          fromSnapshot_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string fromSnapshot = 2;</code>
     * @return The bytes for fromSnapshot.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getFromSnapshotBytes() {
      java.lang.Object ref = fromSnapshot_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        fromSnapshot_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TOSNAPSHOT_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object toSnapshot_ = "";
    /**
     * <code>required string toSnapshot = 3;</code>
     * @return Whether the toSnapshot field is set.
     */
    @java.lang.Override
    public boolean hasToSnapshot() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required string toSnapshot = 3;</code>
     * @return The toSnapshot.
     */
    @java.lang.Override
    public java.lang.String getToSnapshot() {
      java.lang.Object ref = toSnapshot_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          toSnapshot_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string toSnapshot = 3;</code>
     * @return The bytes for toSnapshot.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getToSnapshotBytes() {
      java.lang.Object ref = toSnapshot_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        toSnapshot_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int DIFFREPORTENTRIES_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> diffReportEntries_;
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> getDiffReportEntriesList() {
      return diffReportEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> 
        getDiffReportEntriesOrBuilderList() {
      return diffReportEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    @java.lang.Override
    public int getDiffReportEntriesCount() {
      return diffReportEntries_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) {
      return diffReportEntries_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
        int index) {
      return diffReportEntries_.get(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSnapshotRoot()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasFromSnapshot()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasToSnapshot()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getDiffReportEntriesCount(); i++) {
        if (!getDiffReportEntries(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotRoot_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, fromSnapshot_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, toSnapshot_);
      }
      for (int i = 0; i < diffReportEntries_.size(); i++) {
        output.writeMessage(4, diffReportEntries_.get(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotRoot_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, fromSnapshot_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, toSnapshot_);
      }
      for (int i = 0; i < diffReportEntries_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, diffReportEntries_.get(i));
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) obj;

      if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false;
      if (hasSnapshotRoot()) {
        if (!getSnapshotRoot()
            .equals(other.getSnapshotRoot())) return false;
      }
      if (hasFromSnapshot() != other.hasFromSnapshot()) return false;
      if (hasFromSnapshot()) {
        if (!getFromSnapshot()
            .equals(other.getFromSnapshot())) return false;
      }
      if (hasToSnapshot() != other.hasToSnapshot()) return false;
      if (hasToSnapshot()) {
        if (!getToSnapshot()
            .equals(other.getToSnapshot())) return false;
      }
      if (!getDiffReportEntriesList()
          .equals(other.getDiffReportEntriesList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSnapshotRoot()) {
        hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotRoot().hashCode();
      }
      if (hasFromSnapshot()) {
        hash = (37 * hash) + FROMSNAPSHOT_FIELD_NUMBER;
        hash = (53 * hash) + getFromSnapshot().hashCode();
      }
      if (hasToSnapshot()) {
        hash = (37 * hash) + TOSNAPSHOT_FIELD_NUMBER;
        hash = (53 * hash) + getToSnapshot().hashCode();
      }
      if (getDiffReportEntriesCount() > 0) {
        hash = (37 * hash) + DIFFREPORTENTRIES_FIELD_NUMBER;
        hash = (53 * hash) + getDiffReportEntriesList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshot diff report
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        snapshotRoot_ = "";
        fromSnapshot_ = "";
        toSnapshot_ = "";
        if (diffReportEntriesBuilder_ == null) {
          diffReportEntries_ = java.util.Collections.emptyList();
        } else {
          diffReportEntries_ = null;
          diffReportEntriesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000008);
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result) {
        if (diffReportEntriesBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0)) {
            diffReportEntries_ = java.util.Collections.unmodifiableList(diffReportEntries_);
            bitField0_ = (bitField0_ & ~0x00000008);
          }
          result.diffReportEntries_ = diffReportEntries_;
        } else {
          result.diffReportEntries_ = diffReportEntriesBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.snapshotRoot_ = snapshotRoot_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.fromSnapshot_ = fromSnapshot_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.toSnapshot_ = toSnapshot_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto.getDefaultInstance()) return this;
        if (other.hasSnapshotRoot()) {
          snapshotRoot_ = other.snapshotRoot_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasFromSnapshot()) {
          fromSnapshot_ = other.fromSnapshot_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasToSnapshot()) {
          toSnapshot_ = other.toSnapshot_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (diffReportEntriesBuilder_ == null) {
          if (!other.diffReportEntries_.isEmpty()) {
            if (diffReportEntries_.isEmpty()) {
              diffReportEntries_ = other.diffReportEntries_;
              bitField0_ = (bitField0_ & ~0x00000008);
            } else {
              ensureDiffReportEntriesIsMutable();
              diffReportEntries_.addAll(other.diffReportEntries_);
            }
            onChanged();
          }
        } else {
          if (!other.diffReportEntries_.isEmpty()) {
            if (diffReportEntriesBuilder_.isEmpty()) {
              diffReportEntriesBuilder_.dispose();
              diffReportEntriesBuilder_ = null;
              diffReportEntries_ = other.diffReportEntries_;
              bitField0_ = (bitField0_ & ~0x00000008);
              diffReportEntriesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getDiffReportEntriesFieldBuilder() : null;
            } else {
              diffReportEntriesBuilder_.addAllMessages(other.diffReportEntries_);
            }
          }
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSnapshotRoot()) {
          return false;
        }
        if (!hasFromSnapshot()) {
          return false;
        }
        if (!hasToSnapshot()) {
          return false;
        }
        for (int i = 0; i < getDiffReportEntriesCount(); i++) {
          if (!getDiffReportEntries(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                snapshotRoot_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                fromSnapshot_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                toSnapshot_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.PARSER,
                        extensionRegistry);
                if (diffReportEntriesBuilder_ == null) {
                  ensureDiffReportEntriesIsMutable();
                  diffReportEntries_.add(m);
                } else {
                  diffReportEntriesBuilder_.addMessage(m);
                }
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object snapshotRoot_ = "";
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @return Whether the snapshotRoot field is set.
       */
      public boolean hasSnapshotRoot() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @return The snapshotRoot.
       */
      public java.lang.String getSnapshotRoot() {
        java.lang.Object ref = snapshotRoot_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            snapshotRoot_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @return The bytes for snapshotRoot.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSnapshotRootBytes() {
        java.lang.Object ref = snapshotRoot_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          snapshotRoot_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @param value The snapshotRoot to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotRoot(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotRoot_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotRoot() {
        snapshotRoot_ = getDefaultInstance().getSnapshotRoot();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>required string snapshotRoot = 1;</code>
       * @param value The bytes for snapshotRoot to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotRootBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotRoot_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object fromSnapshot_ = "";
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @return Whether the fromSnapshot field is set.
       */
      public boolean hasFromSnapshot() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @return The fromSnapshot.
       */
      public java.lang.String getFromSnapshot() {
        java.lang.Object ref = fromSnapshot_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            fromSnapshot_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @return The bytes for fromSnapshot.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getFromSnapshotBytes() {
        java.lang.Object ref = fromSnapshot_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          fromSnapshot_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @param value The fromSnapshot to set.
       * @return This builder for chaining.
       */
      public Builder setFromSnapshot(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        fromSnapshot_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearFromSnapshot() {
        fromSnapshot_ = getDefaultInstance().getFromSnapshot();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string fromSnapshot = 2;</code>
       * @param value The bytes for fromSnapshot to set.
       * @return This builder for chaining.
       */
      public Builder setFromSnapshotBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        fromSnapshot_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private java.lang.Object toSnapshot_ = "";
      /**
       * <code>required string toSnapshot = 3;</code>
       * @return Whether the toSnapshot field is set.
       */
      public boolean hasToSnapshot() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required string toSnapshot = 3;</code>
       * @return The toSnapshot.
       */
      public java.lang.String getToSnapshot() {
        java.lang.Object ref = toSnapshot_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            toSnapshot_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string toSnapshot = 3;</code>
       * @return The bytes for toSnapshot.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getToSnapshotBytes() {
        java.lang.Object ref = toSnapshot_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          toSnapshot_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string toSnapshot = 3;</code>
       * @param value The toSnapshot to set.
       * @return This builder for chaining.
       */
      public Builder setToSnapshot(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        toSnapshot_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required string toSnapshot = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearToSnapshot() {
        toSnapshot_ = getDefaultInstance().getToSnapshot();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>required string toSnapshot = 3;</code>
       * @param value The bytes for toSnapshot to set.
       * @return This builder for chaining.
       */
      public Builder setToSnapshotBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        toSnapshot_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> diffReportEntries_ =
        java.util.Collections.emptyList();
      private void ensureDiffReportEntriesIsMutable() {
        if (!((bitField0_ & 0x00000008) != 0)) {
          diffReportEntries_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto>(diffReportEntries_);
          bitField0_ |= 0x00000008;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> diffReportEntriesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> getDiffReportEntriesList() {
        if (diffReportEntriesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(diffReportEntries_);
        } else {
          return diffReportEntriesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public int getDiffReportEntriesCount() {
        if (diffReportEntriesBuilder_ == null) {
          return diffReportEntries_.size();
        } else {
          return diffReportEntriesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto getDiffReportEntries(int index) {
        if (diffReportEntriesBuilder_ == null) {
          return diffReportEntries_.get(index);
        } else {
          return diffReportEntriesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder setDiffReportEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
        if (diffReportEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.set(index, value);
          onChanged();
        } else {
          diffReportEntriesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder setDiffReportEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
        if (diffReportEntriesBuilder_ == null) {
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.set(index, builderForValue.build());
          onChanged();
        } else {
          diffReportEntriesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder addDiffReportEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
        if (diffReportEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.add(value);
          onChanged();
        } else {
          diffReportEntriesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder addDiffReportEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto value) {
        if (diffReportEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.add(index, value);
          onChanged();
        } else {
          diffReportEntriesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder addDiffReportEntries(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
        if (diffReportEntriesBuilder_ == null) {
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.add(builderForValue.build());
          onChanged();
        } else {
          diffReportEntriesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder addDiffReportEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder builderForValue) {
        if (diffReportEntriesBuilder_ == null) {
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.add(index, builderForValue.build());
          onChanged();
        } else {
          diffReportEntriesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder addAllDiffReportEntries(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto> values) {
        if (diffReportEntriesBuilder_ == null) {
          ensureDiffReportEntriesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, diffReportEntries_);
          onChanged();
        } else {
          diffReportEntriesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder clearDiffReportEntries() {
        if (diffReportEntriesBuilder_ == null) {
          diffReportEntries_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
        } else {
          diffReportEntriesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public Builder removeDiffReportEntries(int index) {
        if (diffReportEntriesBuilder_ == null) {
          ensureDiffReportEntriesIsMutable();
          diffReportEntries_.remove(index);
          onChanged();
        } else {
          diffReportEntriesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder getDiffReportEntriesBuilder(
          int index) {
        return getDiffReportEntriesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder getDiffReportEntriesOrBuilder(
          int index) {
        if (diffReportEntriesBuilder_ == null) {
          return diffReportEntries_.get(index);  } else {
          return diffReportEntriesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> 
           getDiffReportEntriesOrBuilderList() {
        if (diffReportEntriesBuilder_ != null) {
          return diffReportEntriesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(diffReportEntries_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder() {
        return getDiffReportEntriesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder addDiffReportEntriesBuilder(
          int index) {
        return getDiffReportEntriesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportEntryProto diffReportEntries = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder> 
           getDiffReportEntriesBuilderList() {
        return getDiffReportEntriesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder> 
          getDiffReportEntriesFieldBuilder() {
        if (diffReportEntriesBuilder_ == null) {
          diffReportEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProtoOrBuilder>(
                  diffReportEntries_,
                  ((bitField0_ & 0x00000008) != 0),
                  getParentForChildren(),
                  isClean());
          diffReportEntries_ = null;
        }
        return diffReportEntriesBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffReportProto>() {
      @java.lang.Override
      public SnapshotDiffReportProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffReportListingEntryProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingEntryProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes fullpath = 1;</code>
     * @return Whether the fullpath field is set.
     */
    boolean hasFullpath();
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return The fullpath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath();

    /**
     * <code>required uint64 dirId = 2;</code>
     * @return Whether the dirId field is set.
     */
    boolean hasDirId();
    /**
     * <code>required uint64 dirId = 2;</code>
     * @return The dirId.
     */
    long getDirId();

    /**
     * <code>required bool isReference = 3;</code>
     * @return Whether the isReference field is set.
     */
    boolean hasIsReference();
    /**
     * <code>required bool isReference = 3;</code>
     * @return The isReference.
     */
    boolean getIsReference();

    /**
     * <code>optional bytes targetPath = 4;</code>
     * @return Whether the targetPath field is set.
     */
    boolean hasTargetPath();
    /**
     * <code>optional bytes targetPath = 4;</code>
     * @return The targetPath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath();

    /**
     * <code>optional uint64 fileId = 5;</code>
     * @return Whether the fileId field is set.
     */
    boolean hasFileId();
    /**
     * <code>optional uint64 fileId = 5;</code>
     * @return The fileId.
     */
    long getFileId();
  }
  /**
   * <pre>
   **
   * Snapshot diff report listing entry
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto}
   */
  public static final class SnapshotDiffReportListingEntryProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto)
      SnapshotDiffReportListingEntryProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffReportListingEntryProto.newBuilder() to construct.
    private SnapshotDiffReportListingEntryProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffReportListingEntryProto() {
      fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffReportListingEntryProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class);
    }

    private int bitField0_;
    public static final int FULLPATH_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return Whether the fullpath field is set.
     */
    @java.lang.Override
    public boolean hasFullpath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes fullpath = 1;</code>
     * @return The fullpath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() {
      return fullpath_;
    }

    public static final int DIRID_FIELD_NUMBER = 2;
    private long dirId_ = 0L;
    /**
     * <code>required uint64 dirId = 2;</code>
     * @return Whether the dirId field is set.
     */
    @java.lang.Override
    public boolean hasDirId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 dirId = 2;</code>
     * @return The dirId.
     */
    @java.lang.Override
    public long getDirId() {
      return dirId_;
    }

    public static final int ISREFERENCE_FIELD_NUMBER = 3;
    private boolean isReference_ = false;
    /**
     * <code>required bool isReference = 3;</code>
     * @return Whether the isReference field is set.
     */
    @java.lang.Override
    public boolean hasIsReference() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bool isReference = 3;</code>
     * @return The isReference.
     */
    @java.lang.Override
    public boolean getIsReference() {
      return isReference_;
    }

    public static final int TARGETPATH_FIELD_NUMBER = 4;
    private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes targetPath = 4;</code>
     * @return Whether the targetPath field is set.
     */
    @java.lang.Override
    public boolean hasTargetPath() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional bytes targetPath = 4;</code>
     * @return The targetPath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() {
      return targetPath_;
    }

    public static final int FILEID_FIELD_NUMBER = 5;
    private long fileId_ = 0L;
    /**
     * <code>optional uint64 fileId = 5;</code>
     * @return Whether the fileId field is set.
     */
    @java.lang.Override
    public boolean hasFileId() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 fileId = 5;</code>
     * @return The fileId.
     */
    @java.lang.Override
    public long getFileId() {
      return fileId_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasFullpath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDirId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIsReference()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, fullpath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, dirId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBool(3, isReference_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBytes(4, targetPath_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, fileId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, fullpath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, dirId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(3, isReference_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(4, targetPath_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, fileId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) obj;

      if (hasFullpath() != other.hasFullpath()) return false;
      if (hasFullpath()) {
        if (!getFullpath()
            .equals(other.getFullpath())) return false;
      }
      if (hasDirId() != other.hasDirId()) return false;
      if (hasDirId()) {
        if (getDirId()
            != other.getDirId()) return false;
      }
      if (hasIsReference() != other.hasIsReference()) return false;
      if (hasIsReference()) {
        if (getIsReference()
            != other.getIsReference()) return false;
      }
      if (hasTargetPath() != other.hasTargetPath()) return false;
      if (hasTargetPath()) {
        if (!getTargetPath()
            .equals(other.getTargetPath())) return false;
      }
      if (hasFileId() != other.hasFileId()) return false;
      if (hasFileId()) {
        if (getFileId()
            != other.getFileId()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasFullpath()) {
        hash = (37 * hash) + FULLPATH_FIELD_NUMBER;
        hash = (53 * hash) + getFullpath().hashCode();
      }
      if (hasDirId()) {
        hash = (37 * hash) + DIRID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDirId());
      }
      if (hasIsReference()) {
        hash = (37 * hash) + ISREFERENCE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsReference());
      }
      if (hasTargetPath()) {
        hash = (37 * hash) + TARGETPATH_FIELD_NUMBER;
        hash = (53 * hash) + getTargetPath().hashCode();
      }
      if (hasFileId()) {
        hash = (37 * hash) + FILEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getFileId());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshot diff report listing entry
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingEntryProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingEntryProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        dirId_ = 0L;
        isReference_ = false;
        targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        fileId_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.fullpath_ = fullpath_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.dirId_ = dirId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.isReference_ = isReference_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.targetPath_ = targetPath_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.fileId_ = fileId_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance()) return this;
        if (other.hasFullpath()) {
          setFullpath(other.getFullpath());
        }
        if (other.hasDirId()) {
          setDirId(other.getDirId());
        }
        if (other.hasIsReference()) {
          setIsReference(other.getIsReference());
        }
        if (other.hasTargetPath()) {
          setTargetPath(other.getTargetPath());
        }
        if (other.hasFileId()) {
          setFileId(other.getFileId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasFullpath()) {
          return false;
        }
        if (!hasDirId()) {
          return false;
        }
        if (!hasIsReference()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                fullpath_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                dirId_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                isReference_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 34: {
                targetPath_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                fileId_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString fullpath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return Whether the fullpath field is set.
       */
      @java.lang.Override
      public boolean hasFullpath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return The fullpath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getFullpath() {
        return fullpath_;
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @param value The fullpath to set.
       * @return This builder for chaining.
       */
      public Builder setFullpath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        fullpath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes fullpath = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearFullpath() {
        bitField0_ = (bitField0_ & ~0x00000001);
        fullpath_ = getDefaultInstance().getFullpath();
        onChanged();
        return this;
      }

      private long dirId_ ;
      /**
       * <code>required uint64 dirId = 2;</code>
       * @return Whether the dirId field is set.
       */
      @java.lang.Override
      public boolean hasDirId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 dirId = 2;</code>
       * @return The dirId.
       */
      @java.lang.Override
      public long getDirId() {
        return dirId_;
      }
      /**
       * <code>required uint64 dirId = 2;</code>
       * @param value The dirId to set.
       * @return This builder for chaining.
       */
      public Builder setDirId(long value) {

        dirId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 dirId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearDirId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        dirId_ = 0L;
        onChanged();
        return this;
      }

      private boolean isReference_ ;
      /**
       * <code>required bool isReference = 3;</code>
       * @return Whether the isReference field is set.
       */
      @java.lang.Override
      public boolean hasIsReference() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bool isReference = 3;</code>
       * @return The isReference.
       */
      @java.lang.Override
      public boolean getIsReference() {
        return isReference_;
      }
      /**
       * <code>required bool isReference = 3;</code>
       * @param value The isReference to set.
       * @return This builder for chaining.
       */
      public Builder setIsReference(boolean value) {

        isReference_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bool isReference = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearIsReference() {
        bitField0_ = (bitField0_ & ~0x00000004);
        isReference_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString targetPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes targetPath = 4;</code>
       * @return Whether the targetPath field is set.
       */
      @java.lang.Override
      public boolean hasTargetPath() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional bytes targetPath = 4;</code>
       * @return The targetPath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getTargetPath() {
        return targetPath_;
      }
      /**
       * <code>optional bytes targetPath = 4;</code>
       * @param value The targetPath to set.
       * @return This builder for chaining.
       */
      public Builder setTargetPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        targetPath_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes targetPath = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetPath() {
        bitField0_ = (bitField0_ & ~0x00000008);
        targetPath_ = getDefaultInstance().getTargetPath();
        onChanged();
        return this;
      }

      private long fileId_ ;
      /**
       * <code>optional uint64 fileId = 5;</code>
       * @return Whether the fileId field is set.
       */
      @java.lang.Override
      public boolean hasFileId() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 fileId = 5;</code>
       * @return The fileId.
       */
      @java.lang.Override
      public long getFileId() {
        return fileId_;
      }
      /**
       * <code>optional uint64 fileId = 5;</code>
       * @param value The fileId to set.
       * @return This builder for chaining.
       */
      public Builder setFileId(long value) {

        fileId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 fileId = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearFileId() {
        bitField0_ = (bitField0_ & ~0x00000010);
        fileId_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingEntryProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingEntryProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffReportListingEntryProto>() {
      @java.lang.Override
      public SnapshotDiffReportListingEntryProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingEntryProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingEntryProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffReportCursorProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportCursorProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes startPath = 1;</code>
     * @return Whether the startPath field is set.
     */
    boolean hasStartPath();
    /**
     * <code>required bytes startPath = 1;</code>
     * @return The startPath.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath();

    /**
     * <code>required int32 index = 2 [default = -1];</code>
     * @return Whether the index field is set.
     */
    boolean hasIndex();
    /**
     * <code>required int32 index = 2 [default = -1];</code>
     * @return The index.
     */
    int getIndex();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto}
   */
  public static final class SnapshotDiffReportCursorProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportCursorProto)
      SnapshotDiffReportCursorProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffReportCursorProto.newBuilder() to construct.
    private SnapshotDiffReportCursorProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffReportCursorProto() {
      startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      index_ = -1;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffReportCursorProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class);
    }

    private int bitField0_;
    public static final int STARTPATH_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes startPath = 1;</code>
     * @return Whether the startPath field is set.
     */
    @java.lang.Override
    public boolean hasStartPath() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes startPath = 1;</code>
     * @return The startPath.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() {
      return startPath_;
    }

    public static final int INDEX_FIELD_NUMBER = 2;
    private int index_ = -1;
    /**
     * <code>required int32 index = 2 [default = -1];</code>
     * @return Whether the index field is set.
     */
    @java.lang.Override
    public boolean hasIndex() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required int32 index = 2 [default = -1];</code>
     * @return The index.
     */
    @java.lang.Override
    public int getIndex() {
      return index_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStartPath()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasIndex()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, startPath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt32(2, index_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, startPath_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(2, index_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) obj;

      if (hasStartPath() != other.hasStartPath()) return false;
      if (hasStartPath()) {
        if (!getStartPath()
            .equals(other.getStartPath())) return false;
      }
      if (hasIndex() != other.hasIndex()) return false;
      if (hasIndex()) {
        if (getIndex()
            != other.getIndex()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStartPath()) {
        hash = (37 * hash) + STARTPATH_FIELD_NUMBER;
        hash = (53 * hash) + getStartPath().hashCode();
      }
      if (hasIndex()) {
        hash = (37 * hash) + INDEX_FIELD_NUMBER;
        hash = (53 * hash) + getIndex();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportCursorProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportCursorProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        index_ = -1;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.startPath_ = startPath_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.index_ = index_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) return this;
        if (other.hasStartPath()) {
          setStartPath(other.getStartPath());
        }
        if (other.hasIndex()) {
          setIndex(other.getIndex());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStartPath()) {
          return false;
        }
        if (!hasIndex()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                startPath_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                index_ = input.readInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString startPath_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes startPath = 1;</code>
       * @return Whether the startPath field is set.
       */
      @java.lang.Override
      public boolean hasStartPath() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes startPath = 1;</code>
       * @return The startPath.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getStartPath() {
        return startPath_;
      }
      /**
       * <code>required bytes startPath = 1;</code>
       * @param value The startPath to set.
       * @return This builder for chaining.
       */
      public Builder setStartPath(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        startPath_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes startPath = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStartPath() {
        bitField0_ = (bitField0_ & ~0x00000001);
        startPath_ = getDefaultInstance().getStartPath();
        onChanged();
        return this;
      }

      private int index_ = -1;
      /**
       * <code>required int32 index = 2 [default = -1];</code>
       * @return Whether the index field is set.
       */
      @java.lang.Override
      public boolean hasIndex() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required int32 index = 2 [default = -1];</code>
       * @return The index.
       */
      @java.lang.Override
      public int getIndex() {
        return index_;
      }
      /**
       * <code>required int32 index = 2 [default = -1];</code>
       * @param value The index to set.
       * @return This builder for chaining.
       */
      public Builder setIndex(int value) {

        index_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required int32 index = 2 [default = -1];</code>
       * @return This builder for chaining.
       */
      public Builder clearIndex() {
        bitField0_ = (bitField0_ & ~0x00000002);
        index_ = -1;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportCursorProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportCursorProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportCursorProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffReportCursorProto>() {
      @java.lang.Override
      public SnapshotDiffReportCursorProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportCursorProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportCursorProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotDiffReportListingProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotDiffReportListingProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> 
        getModifiedEntriesList();
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index);
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    int getModifiedEntriesCount();
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getModifiedEntriesOrBuilderList();
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> 
        getCreatedEntriesList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index);
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    int getCreatedEntriesCount();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getCreatedEntriesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> 
        getDeletedEntriesList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index);
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    int getDeletedEntriesCount();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getDeletedEntriesOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder(
        int index);

    /**
     * <code>required bool isFromEarlier = 4;</code>
     * @return Whether the isFromEarlier field is set.
     */
    boolean hasIsFromEarlier();
    /**
     * <code>required bool isFromEarlier = 4;</code>
     * @return The isFromEarlier.
     */
    boolean getIsFromEarlier();

    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     * @return Whether the cursor field is set.
     */
    boolean hasCursor();
    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     * @return The cursor.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor();
    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder();
  }
  /**
   * <pre>
   **
   * Snapshot diff report listing
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto}
   */
  public static final class SnapshotDiffReportListingProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotDiffReportListingProto)
      SnapshotDiffReportListingProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotDiffReportListingProto.newBuilder() to construct.
    private SnapshotDiffReportListingProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotDiffReportListingProto() {
      modifiedEntries_ = java.util.Collections.emptyList();
      createdEntries_ = java.util.Collections.emptyList();
      deletedEntries_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotDiffReportListingProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class);
    }

    private int bitField0_;
    public static final int MODIFIEDENTRIES_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> modifiedEntries_;
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getModifiedEntriesList() {
      return modifiedEntries_;
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getModifiedEntriesOrBuilderList() {
      return modifiedEntries_;
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    @java.lang.Override
    public int getModifiedEntriesCount() {
      return modifiedEntries_.size();
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) {
      return modifiedEntries_.get(index);
    }
    /**
     * <pre>
     * full path of the directory where snapshots were taken
     * </pre>
     *
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder(
        int index) {
      return modifiedEntries_.get(index);
    }

    public static final int CREATEDENTRIES_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> createdEntries_;
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getCreatedEntriesList() {
      return createdEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getCreatedEntriesOrBuilderList() {
      return createdEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    @java.lang.Override
    public int getCreatedEntriesCount() {
      return createdEntries_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) {
      return createdEntries_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder(
        int index) {
      return createdEntries_.get(index);
    }

    public static final int DELETEDENTRIES_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> deletedEntries_;
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getDeletedEntriesList() {
      return deletedEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
        getDeletedEntriesOrBuilderList() {
      return deletedEntries_;
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    @java.lang.Override
    public int getDeletedEntriesCount() {
      return deletedEntries_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) {
      return deletedEntries_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder(
        int index) {
      return deletedEntries_.get(index);
    }

    public static final int ISFROMEARLIER_FIELD_NUMBER = 4;
    private boolean isFromEarlier_ = false;
    /**
     * <code>required bool isFromEarlier = 4;</code>
     * @return Whether the isFromEarlier field is set.
     */
    @java.lang.Override
    public boolean hasIsFromEarlier() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bool isFromEarlier = 4;</code>
     * @return The isFromEarlier.
     */
    @java.lang.Override
    public boolean getIsFromEarlier() {
      return isFromEarlier_;
    }

    public static final int CURSOR_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_;
    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     * @return Whether the cursor field is set.
     */
    @java.lang.Override
    public boolean hasCursor() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     * @return The cursor.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() {
      return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_;
    }
    /**
     * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() {
      return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasIsFromEarlier()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getModifiedEntriesCount(); i++) {
        if (!getModifiedEntries(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      for (int i = 0; i < getCreatedEntriesCount(); i++) {
        if (!getCreatedEntries(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      for (int i = 0; i < getDeletedEntriesCount(); i++) {
        if (!getDeletedEntries(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasCursor()) {
        if (!getCursor().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < modifiedEntries_.size(); i++) {
        output.writeMessage(1, modifiedEntries_.get(i));
      }
      for (int i = 0; i < createdEntries_.size(); i++) {
        output.writeMessage(2, createdEntries_.get(i));
      }
      for (int i = 0; i < deletedEntries_.size(); i++) {
        output.writeMessage(3, deletedEntries_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBool(4, isFromEarlier_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(5, getCursor());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (int i = 0; i < modifiedEntries_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, modifiedEntries_.get(i));
      }
      for (int i = 0; i < createdEntries_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, createdEntries_.get(i));
      }
      for (int i = 0; i < deletedEntries_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, deletedEntries_.get(i));
      }
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, isFromEarlier_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getCursor());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) obj;

      if (!getModifiedEntriesList()
          .equals(other.getModifiedEntriesList())) return false;
      if (!getCreatedEntriesList()
          .equals(other.getCreatedEntriesList())) return false;
      if (!getDeletedEntriesList()
          .equals(other.getDeletedEntriesList())) return false;
      if (hasIsFromEarlier() != other.hasIsFromEarlier()) return false;
      if (hasIsFromEarlier()) {
        if (getIsFromEarlier()
            != other.getIsFromEarlier()) return false;
      }
      if (hasCursor() != other.hasCursor()) return false;
      if (hasCursor()) {
        if (!getCursor()
            .equals(other.getCursor())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getModifiedEntriesCount() > 0) {
        hash = (37 * hash) + MODIFIEDENTRIES_FIELD_NUMBER;
        hash = (53 * hash) + getModifiedEntriesList().hashCode();
      }
      if (getCreatedEntriesCount() > 0) {
        hash = (37 * hash) + CREATEDENTRIES_FIELD_NUMBER;
        hash = (53 * hash) + getCreatedEntriesList().hashCode();
      }
      if (getDeletedEntriesCount() > 0) {
        hash = (37 * hash) + DELETEDENTRIES_FIELD_NUMBER;
        hash = (53 * hash) + getDeletedEntriesList().hashCode();
      }
      if (hasIsFromEarlier()) {
        hash = (37 * hash) + ISFROMEARLIER_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getIsFromEarlier());
      }
      if (hasCursor()) {
        hash = (37 * hash) + CURSOR_FIELD_NUMBER;
        hash = (53 * hash) + getCursor().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Snapshot diff report listing
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotDiffReportListingProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotDiffReportListingProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getModifiedEntriesFieldBuilder();
          getCreatedEntriesFieldBuilder();
          getDeletedEntriesFieldBuilder();
          getCursorFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        if (modifiedEntriesBuilder_ == null) {
          modifiedEntries_ = java.util.Collections.emptyList();
        } else {
          modifiedEntries_ = null;
          modifiedEntriesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000001);
        if (createdEntriesBuilder_ == null) {
          createdEntries_ = java.util.Collections.emptyList();
        } else {
          createdEntries_ = null;
          createdEntriesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        if (deletedEntriesBuilder_ == null) {
          deletedEntries_ = java.util.Collections.emptyList();
        } else {
          deletedEntries_ = null;
          deletedEntriesBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        isFromEarlier_ = false;
        cursor_ = null;
        if (cursorBuilder_ != null) {
          cursorBuilder_.dispose();
          cursorBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result) {
        if (modifiedEntriesBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0)) {
            modifiedEntries_ = java.util.Collections.unmodifiableList(modifiedEntries_);
            bitField0_ = (bitField0_ & ~0x00000001);
          }
          result.modifiedEntries_ = modifiedEntries_;
        } else {
          result.modifiedEntries_ = modifiedEntriesBuilder_.build();
        }
        if (createdEntriesBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            createdEntries_ = java.util.Collections.unmodifiableList(createdEntries_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.createdEntries_ = createdEntries_;
        } else {
          result.createdEntries_ = createdEntriesBuilder_.build();
        }
        if (deletedEntriesBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            deletedEntries_ = java.util.Collections.unmodifiableList(deletedEntries_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.deletedEntries_ = deletedEntries_;
        } else {
          result.deletedEntries_ = deletedEntriesBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.isFromEarlier_ = isFromEarlier_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.cursor_ = cursorBuilder_ == null
              ? cursor_
              : cursorBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto.getDefaultInstance()) return this;
        if (modifiedEntriesBuilder_ == null) {
          if (!other.modifiedEntries_.isEmpty()) {
            if (modifiedEntries_.isEmpty()) {
              modifiedEntries_ = other.modifiedEntries_;
              bitField0_ = (bitField0_ & ~0x00000001);
            } else {
              ensureModifiedEntriesIsMutable();
              modifiedEntries_.addAll(other.modifiedEntries_);
            }
            onChanged();
          }
        } else {
          if (!other.modifiedEntries_.isEmpty()) {
            if (modifiedEntriesBuilder_.isEmpty()) {
              modifiedEntriesBuilder_.dispose();
              modifiedEntriesBuilder_ = null;
              modifiedEntries_ = other.modifiedEntries_;
              bitField0_ = (bitField0_ & ~0x00000001);
              modifiedEntriesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getModifiedEntriesFieldBuilder() : null;
            } else {
              modifiedEntriesBuilder_.addAllMessages(other.modifiedEntries_);
            }
          }
        }
        if (createdEntriesBuilder_ == null) {
          if (!other.createdEntries_.isEmpty()) {
            if (createdEntries_.isEmpty()) {
              createdEntries_ = other.createdEntries_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureCreatedEntriesIsMutable();
              createdEntries_.addAll(other.createdEntries_);
            }
            onChanged();
          }
        } else {
          if (!other.createdEntries_.isEmpty()) {
            if (createdEntriesBuilder_.isEmpty()) {
              createdEntriesBuilder_.dispose();
              createdEntriesBuilder_ = null;
              createdEntries_ = other.createdEntries_;
              bitField0_ = (bitField0_ & ~0x00000002);
              createdEntriesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getCreatedEntriesFieldBuilder() : null;
            } else {
              createdEntriesBuilder_.addAllMessages(other.createdEntries_);
            }
          }
        }
        if (deletedEntriesBuilder_ == null) {
          if (!other.deletedEntries_.isEmpty()) {
            if (deletedEntries_.isEmpty()) {
              deletedEntries_ = other.deletedEntries_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureDeletedEntriesIsMutable();
              deletedEntries_.addAll(other.deletedEntries_);
            }
            onChanged();
          }
        } else {
          if (!other.deletedEntries_.isEmpty()) {
            if (deletedEntriesBuilder_.isEmpty()) {
              deletedEntriesBuilder_.dispose();
              deletedEntriesBuilder_ = null;
              deletedEntries_ = other.deletedEntries_;
              bitField0_ = (bitField0_ & ~0x00000004);
              deletedEntriesBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getDeletedEntriesFieldBuilder() : null;
            } else {
              deletedEntriesBuilder_.addAllMessages(other.deletedEntries_);
            }
          }
        }
        if (other.hasIsFromEarlier()) {
          setIsFromEarlier(other.getIsFromEarlier());
        }
        if (other.hasCursor()) {
          mergeCursor(other.getCursor());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasIsFromEarlier()) {
          return false;
        }
        for (int i = 0; i < getModifiedEntriesCount(); i++) {
          if (!getModifiedEntries(i).isInitialized()) {
            return false;
          }
        }
        for (int i = 0; i < getCreatedEntriesCount(); i++) {
          if (!getCreatedEntries(i).isInitialized()) {
            return false;
          }
        }
        for (int i = 0; i < getDeletedEntriesCount(); i++) {
          if (!getDeletedEntries(i).isInitialized()) {
            return false;
          }
        }
        if (hasCursor()) {
          if (!getCursor().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER,
                        extensionRegistry);
                if (modifiedEntriesBuilder_ == null) {
                  ensureModifiedEntriesIsMutable();
                  modifiedEntries_.add(m);
                } else {
                  modifiedEntriesBuilder_.addMessage(m);
                }
                break;
              } // case 10
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER,
                        extensionRegistry);
                if (createdEntriesBuilder_ == null) {
                  ensureCreatedEntriesIsMutable();
                  createdEntries_.add(m);
                } else {
                  createdEntriesBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.PARSER,
                        extensionRegistry);
                if (deletedEntriesBuilder_ == null) {
                  ensureDeletedEntriesIsMutable();
                  deletedEntries_.add(m);
                } else {
                  deletedEntriesBuilder_.addMessage(m);
                }
                break;
              } // case 26
              case 32: {
                isFromEarlier_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 42: {
                input.readMessage(
                    getCursorFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> modifiedEntries_ =
        java.util.Collections.emptyList();
      private void ensureModifiedEntriesIsMutable() {
        if (!((bitField0_ & 0x00000001) != 0)) {
          modifiedEntries_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto>(modifiedEntries_);
          bitField0_ |= 0x00000001;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> modifiedEntriesBuilder_;

      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getModifiedEntriesList() {
        if (modifiedEntriesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(modifiedEntries_);
        } else {
          return modifiedEntriesBuilder_.getMessageList();
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public int getModifiedEntriesCount() {
        if (modifiedEntriesBuilder_ == null) {
          return modifiedEntries_.size();
        } else {
          return modifiedEntriesBuilder_.getCount();
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getModifiedEntries(int index) {
        if (modifiedEntriesBuilder_ == null) {
          return modifiedEntries_.get(index);
        } else {
          return modifiedEntriesBuilder_.getMessage(index);
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder setModifiedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (modifiedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.set(index, value);
          onChanged();
        } else {
          modifiedEntriesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder setModifiedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (modifiedEntriesBuilder_ == null) {
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.set(index, builderForValue.build());
          onChanged();
        } else {
          modifiedEntriesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder addModifiedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (modifiedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.add(value);
          onChanged();
        } else {
          modifiedEntriesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder addModifiedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (modifiedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.add(index, value);
          onChanged();
        } else {
          modifiedEntriesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder addModifiedEntries(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (modifiedEntriesBuilder_ == null) {
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.add(builderForValue.build());
          onChanged();
        } else {
          modifiedEntriesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder addModifiedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (modifiedEntriesBuilder_ == null) {
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.add(index, builderForValue.build());
          onChanged();
        } else {
          modifiedEntriesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder addAllModifiedEntries(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> values) {
        if (modifiedEntriesBuilder_ == null) {
          ensureModifiedEntriesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, modifiedEntries_);
          onChanged();
        } else {
          modifiedEntriesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder clearModifiedEntries() {
        if (modifiedEntriesBuilder_ == null) {
          modifiedEntries_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000001);
          onChanged();
        } else {
          modifiedEntriesBuilder_.clear();
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public Builder removeModifiedEntries(int index) {
        if (modifiedEntriesBuilder_ == null) {
          ensureModifiedEntriesIsMutable();
          modifiedEntries_.remove(index);
          onChanged();
        } else {
          modifiedEntriesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getModifiedEntriesBuilder(
          int index) {
        return getModifiedEntriesFieldBuilder().getBuilder(index);
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getModifiedEntriesOrBuilder(
          int index) {
        if (modifiedEntriesBuilder_ == null) {
          return modifiedEntries_.get(index);  } else {
          return modifiedEntriesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
           getModifiedEntriesOrBuilderList() {
        if (modifiedEntriesBuilder_ != null) {
          return modifiedEntriesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(modifiedEntries_);
        }
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder() {
        return getModifiedEntriesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addModifiedEntriesBuilder(
          int index) {
        return getModifiedEntriesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <pre>
       * full path of the directory where snapshots were taken
       * </pre>
       *
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto modifiedEntries = 1;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder> 
           getModifiedEntriesBuilderList() {
        return getModifiedEntriesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
          getModifiedEntriesFieldBuilder() {
        if (modifiedEntriesBuilder_ == null) {
          modifiedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>(
                  modifiedEntries_,
                  ((bitField0_ & 0x00000001) != 0),
                  getParentForChildren(),
                  isClean());
          modifiedEntries_ = null;
        }
        return modifiedEntriesBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> createdEntries_ =
        java.util.Collections.emptyList();
      private void ensureCreatedEntriesIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          createdEntries_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto>(createdEntries_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> createdEntriesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getCreatedEntriesList() {
        if (createdEntriesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(createdEntries_);
        } else {
          return createdEntriesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public int getCreatedEntriesCount() {
        if (createdEntriesBuilder_ == null) {
          return createdEntries_.size();
        } else {
          return createdEntriesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getCreatedEntries(int index) {
        if (createdEntriesBuilder_ == null) {
          return createdEntries_.get(index);
        } else {
          return createdEntriesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder setCreatedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (createdEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCreatedEntriesIsMutable();
          createdEntries_.set(index, value);
          onChanged();
        } else {
          createdEntriesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder setCreatedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (createdEntriesBuilder_ == null) {
          ensureCreatedEntriesIsMutable();
          createdEntries_.set(index, builderForValue.build());
          onChanged();
        } else {
          createdEntriesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder addCreatedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (createdEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCreatedEntriesIsMutable();
          createdEntries_.add(value);
          onChanged();
        } else {
          createdEntriesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder addCreatedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (createdEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCreatedEntriesIsMutable();
          createdEntries_.add(index, value);
          onChanged();
        } else {
          createdEntriesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder addCreatedEntries(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (createdEntriesBuilder_ == null) {
          ensureCreatedEntriesIsMutable();
          createdEntries_.add(builderForValue.build());
          onChanged();
        } else {
          createdEntriesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder addCreatedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (createdEntriesBuilder_ == null) {
          ensureCreatedEntriesIsMutable();
          createdEntries_.add(index, builderForValue.build());
          onChanged();
        } else {
          createdEntriesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder addAllCreatedEntries(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> values) {
        if (createdEntriesBuilder_ == null) {
          ensureCreatedEntriesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, createdEntries_);
          onChanged();
        } else {
          createdEntriesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder clearCreatedEntries() {
        if (createdEntriesBuilder_ == null) {
          createdEntries_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          createdEntriesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public Builder removeCreatedEntries(int index) {
        if (createdEntriesBuilder_ == null) {
          ensureCreatedEntriesIsMutable();
          createdEntries_.remove(index);
          onChanged();
        } else {
          createdEntriesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getCreatedEntriesBuilder(
          int index) {
        return getCreatedEntriesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getCreatedEntriesOrBuilder(
          int index) {
        if (createdEntriesBuilder_ == null) {
          return createdEntries_.get(index);  } else {
          return createdEntriesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
           getCreatedEntriesOrBuilderList() {
        if (createdEntriesBuilder_ != null) {
          return createdEntriesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(createdEntries_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder() {
        return getCreatedEntriesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addCreatedEntriesBuilder(
          int index) {
        return getCreatedEntriesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto createdEntries = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder> 
           getCreatedEntriesBuilderList() {
        return getCreatedEntriesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
          getCreatedEntriesFieldBuilder() {
        if (createdEntriesBuilder_ == null) {
          createdEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>(
                  createdEntries_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          createdEntries_ = null;
        }
        return createdEntriesBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> deletedEntries_ =
        java.util.Collections.emptyList();
      private void ensureDeletedEntriesIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          deletedEntries_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto>(deletedEntries_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> deletedEntriesBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> getDeletedEntriesList() {
        if (deletedEntriesBuilder_ == null) {
          return java.util.Collections.unmodifiableList(deletedEntries_);
        } else {
          return deletedEntriesBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public int getDeletedEntriesCount() {
        if (deletedEntriesBuilder_ == null) {
          return deletedEntries_.size();
        } else {
          return deletedEntriesBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto getDeletedEntries(int index) {
        if (deletedEntriesBuilder_ == null) {
          return deletedEntries_.get(index);
        } else {
          return deletedEntriesBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder setDeletedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (deletedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDeletedEntriesIsMutable();
          deletedEntries_.set(index, value);
          onChanged();
        } else {
          deletedEntriesBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder setDeletedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (deletedEntriesBuilder_ == null) {
          ensureDeletedEntriesIsMutable();
          deletedEntries_.set(index, builderForValue.build());
          onChanged();
        } else {
          deletedEntriesBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder addDeletedEntries(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (deletedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDeletedEntriesIsMutable();
          deletedEntries_.add(value);
          onChanged();
        } else {
          deletedEntriesBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder addDeletedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto value) {
        if (deletedEntriesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureDeletedEntriesIsMutable();
          deletedEntries_.add(index, value);
          onChanged();
        } else {
          deletedEntriesBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder addDeletedEntries(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (deletedEntriesBuilder_ == null) {
          ensureDeletedEntriesIsMutable();
          deletedEntries_.add(builderForValue.build());
          onChanged();
        } else {
          deletedEntriesBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder addDeletedEntries(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder builderForValue) {
        if (deletedEntriesBuilder_ == null) {
          ensureDeletedEntriesIsMutable();
          deletedEntries_.add(index, builderForValue.build());
          onChanged();
        } else {
          deletedEntriesBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder addAllDeletedEntries(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto> values) {
        if (deletedEntriesBuilder_ == null) {
          ensureDeletedEntriesIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, deletedEntries_);
          onChanged();
        } else {
          deletedEntriesBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder clearDeletedEntries() {
        if (deletedEntriesBuilder_ == null) {
          deletedEntries_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          deletedEntriesBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public Builder removeDeletedEntries(int index) {
        if (deletedEntriesBuilder_ == null) {
          ensureDeletedEntriesIsMutable();
          deletedEntries_.remove(index);
          onChanged();
        } else {
          deletedEntriesBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder getDeletedEntriesBuilder(
          int index) {
        return getDeletedEntriesFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder getDeletedEntriesOrBuilder(
          int index) {
        if (deletedEntriesBuilder_ == null) {
          return deletedEntries_.get(index);  } else {
          return deletedEntriesBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
           getDeletedEntriesOrBuilderList() {
        if (deletedEntriesBuilder_ != null) {
          return deletedEntriesBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(deletedEntries_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder() {
        return getDeletedEntriesFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder addDeletedEntriesBuilder(
          int index) {
        return getDeletedEntriesFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.SnapshotDiffReportListingEntryProto deletedEntries = 3;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder> 
           getDeletedEntriesBuilderList() {
        return getDeletedEntriesFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder> 
          getDeletedEntriesFieldBuilder() {
        if (deletedEntriesBuilder_ == null) {
          deletedEntriesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingEntryProtoOrBuilder>(
                  deletedEntries_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          deletedEntries_ = null;
        }
        return deletedEntriesBuilder_;
      }

      private boolean isFromEarlier_ ;
      /**
       * <code>required bool isFromEarlier = 4;</code>
       * @return Whether the isFromEarlier field is set.
       */
      @java.lang.Override
      public boolean hasIsFromEarlier() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required bool isFromEarlier = 4;</code>
       * @return The isFromEarlier.
       */
      @java.lang.Override
      public boolean getIsFromEarlier() {
        return isFromEarlier_;
      }
      /**
       * <code>required bool isFromEarlier = 4;</code>
       * @param value The isFromEarlier to set.
       * @return This builder for chaining.
       */
      public Builder setIsFromEarlier(boolean value) {

        isFromEarlier_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required bool isFromEarlier = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearIsFromEarlier() {
        bitField0_ = (bitField0_ & ~0x00000008);
        isFromEarlier_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto cursor_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> cursorBuilder_;
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       * @return Whether the cursor field is set.
       */
      public boolean hasCursor() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       * @return The cursor.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto getCursor() {
        if (cursorBuilder_ == null) {
          return cursor_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_;
        } else {
          return cursorBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public Builder setCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) {
        if (cursorBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          cursor_ = value;
        } else {
          cursorBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public Builder setCursor(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder builderForValue) {
        if (cursorBuilder_ == null) {
          cursor_ = builderForValue.build();
        } else {
          cursorBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public Builder mergeCursor(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto value) {
        if (cursorBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            cursor_ != null &&
            cursor_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance()) {
            getCursorBuilder().mergeFrom(value);
          } else {
            cursor_ = value;
          }
        } else {
          cursorBuilder_.mergeFrom(value);
        }
        if (cursor_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public Builder clearCursor() {
        bitField0_ = (bitField0_ & ~0x00000010);
        cursor_ = null;
        if (cursorBuilder_ != null) {
          cursorBuilder_.dispose();
          cursorBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder getCursorBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getCursorFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder getCursorOrBuilder() {
        if (cursorBuilder_ != null) {
          return cursorBuilder_.getMessageOrBuilder();
        } else {
          return cursor_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.getDefaultInstance() : cursor_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.SnapshotDiffReportCursorProto cursor = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder> 
          getCursorFieldBuilder() {
        if (cursorBuilder_ == null) {
          cursorBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportCursorProtoOrBuilder>(
                  getCursor(),
                  getParentForChildren(),
                  isClean());
          cursor_ = null;
        }
        return cursorBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotDiffReportListingProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotDiffReportListingProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotDiffReportListingProto>() {
      @java.lang.Override
      public SnapshotDiffReportListingProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotDiffReportListingProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportListingProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint64 blockId = 1;</code>
     * @return Whether the blockId field is set.
     */
    boolean hasBlockId();
    /**
     * <code>required uint64 blockId = 1;</code>
     * @return The blockId.
     */
    long getBlockId();

    /**
     * <code>required uint64 genStamp = 2;</code>
     * @return Whether the genStamp field is set.
     */
    boolean hasGenStamp();
    /**
     * <code>required uint64 genStamp = 2;</code>
     * @return The genStamp.
     */
    long getGenStamp();

    /**
     * <code>optional uint64 numBytes = 3 [default = 0];</code>
     * @return Whether the numBytes field is set.
     */
    boolean hasNumBytes();
    /**
     * <code>optional uint64 numBytes = 3 [default = 0];</code>
     * @return The numBytes.
     */
    long getNumBytes();
  }
  /**
   * <pre>
   **
   * Block information
   *
   * Please be wary of adding additional fields here, since INodeFiles
   * need to fit in PB's default max message size of 64MB.
   * We restrict the max # of blocks per file
   * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
   * to avoid changing this.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.BlockProto}
   */
  public static final class BlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockProto)
      BlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockProto.newBuilder() to construct.
    private BlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCKID_FIELD_NUMBER = 1;
    private long blockId_ = 0L;
    /**
     * <code>required uint64 blockId = 1;</code>
     * @return Whether the blockId field is set.
     */
    @java.lang.Override
    public boolean hasBlockId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint64 blockId = 1;</code>
     * @return The blockId.
     */
    @java.lang.Override
    public long getBlockId() {
      return blockId_;
    }

    public static final int GENSTAMP_FIELD_NUMBER = 2;
    private long genStamp_ = 0L;
    /**
     * <code>required uint64 genStamp = 2;</code>
     * @return Whether the genStamp field is set.
     */
    @java.lang.Override
    public boolean hasGenStamp() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 genStamp = 2;</code>
     * @return The genStamp.
     */
    @java.lang.Override
    public long getGenStamp() {
      return genStamp_;
    }

    public static final int NUMBYTES_FIELD_NUMBER = 3;
    private long numBytes_ = 0L;
    /**
     * <code>optional uint64 numBytes = 3 [default = 0];</code>
     * @return Whether the numBytes field is set.
     */
    @java.lang.Override
    public boolean hasNumBytes() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional uint64 numBytes = 3 [default = 0];</code>
     * @return The numBytes.
     */
    @java.lang.Override
    public long getNumBytes() {
      return numBytes_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlockId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasGenStamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, blockId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, genStamp_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, numBytes_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, blockId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, genStamp_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, numBytes_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) obj;

      if (hasBlockId() != other.hasBlockId()) return false;
      if (hasBlockId()) {
        if (getBlockId()
            != other.getBlockId()) return false;
      }
      if (hasGenStamp() != other.hasGenStamp()) return false;
      if (hasGenStamp()) {
        if (getGenStamp()
            != other.getGenStamp()) return false;
      }
      if (hasNumBytes() != other.hasNumBytes()) return false;
      if (hasNumBytes()) {
        if (getNumBytes()
            != other.getNumBytes()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlockId()) {
        hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockId());
      }
      if (hasGenStamp()) {
        hash = (37 * hash) + GENSTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getGenStamp());
      }
      if (hasNumBytes()) {
        hash = (37 * hash) + NUMBYTES_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getNumBytes());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Block information
     *
     * Please be wary of adding additional fields here, since INodeFiles
     * need to fit in PB's default max message size of 64MB.
     * We restrict the max # of blocks per file
     * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
     * to avoid changing this.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.BlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        blockId_ = 0L;
        genStamp_ = 0L;
        numBytes_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.blockId_ = blockId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.genStamp_ = genStamp_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.numBytes_ = numBytes_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance()) return this;
        if (other.hasBlockId()) {
          setBlockId(other.getBlockId());
        }
        if (other.hasGenStamp()) {
          setGenStamp(other.getGenStamp());
        }
        if (other.hasNumBytes()) {
          setNumBytes(other.getNumBytes());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlockId()) {
          return false;
        }
        if (!hasGenStamp()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                blockId_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                genStamp_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                numBytes_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long blockId_ ;
      /**
       * <code>required uint64 blockId = 1;</code>
       * @return Whether the blockId field is set.
       */
      @java.lang.Override
      public boolean hasBlockId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint64 blockId = 1;</code>
       * @return The blockId.
       */
      @java.lang.Override
      public long getBlockId() {
        return blockId_;
      }
      /**
       * <code>required uint64 blockId = 1;</code>
       * @param value The blockId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockId(long value) {

        blockId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 blockId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        blockId_ = 0L;
        onChanged();
        return this;
      }

      private long genStamp_ ;
      /**
       * <code>required uint64 genStamp = 2;</code>
       * @return Whether the genStamp field is set.
       */
      @java.lang.Override
      public boolean hasGenStamp() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 genStamp = 2;</code>
       * @return The genStamp.
       */
      @java.lang.Override
      public long getGenStamp() {
        return genStamp_;
      }
      /**
       * <code>required uint64 genStamp = 2;</code>
       * @param value The genStamp to set.
       * @return This builder for chaining.
       */
      public Builder setGenStamp(long value) {

        genStamp_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 genStamp = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearGenStamp() {
        bitField0_ = (bitField0_ & ~0x00000002);
        genStamp_ = 0L;
        onChanged();
        return this;
      }

      private long numBytes_ ;
      /**
       * <code>optional uint64 numBytes = 3 [default = 0];</code>
       * @return Whether the numBytes field is set.
       */
      @java.lang.Override
      public boolean hasNumBytes() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 numBytes = 3 [default = 0];</code>
       * @return The numBytes.
       */
      @java.lang.Override
      public long getNumBytes() {
        return numBytes_;
      }
      /**
       * <code>optional uint64 numBytes = 3 [default = 0];</code>
       * @param value The numBytes to set.
       * @return This builder for chaining.
       */
      public Builder setNumBytes(long value) {

        numBytes_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 numBytes = 3 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearNumBytes() {
        bitField0_ = (bitField0_ & ~0x00000004);
        numBytes_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockProto>() {
      @java.lang.Override
      public BlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface SnapshotInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.SnapshotInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string snapshotName = 1;</code>
     * @return Whether the snapshotName field is set.
     */
    boolean hasSnapshotName();
    /**
     * <code>required string snapshotName = 1;</code>
     * @return The snapshotName.
     */
    java.lang.String getSnapshotName();
    /**
     * <code>required string snapshotName = 1;</code>
     * @return The bytes for snapshotName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotNameBytes();

    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return Whether the snapshotRoot field is set.
     */
    boolean hasSnapshotRoot();
    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return The snapshotRoot.
     */
    java.lang.String getSnapshotRoot();
    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return The bytes for snapshotRoot.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotRootBytes();

    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     * @return Whether the permission field is set.
     */
    boolean hasPermission();
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     * @return The permission.
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission();
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder();

    /**
     * <code>required string owner = 4;</code>
     * @return Whether the owner field is set.
     */
    boolean hasOwner();
    /**
     * <code>required string owner = 4;</code>
     * @return The owner.
     */
    java.lang.String getOwner();
    /**
     * <code>required string owner = 4;</code>
     * @return The bytes for owner.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerBytes();

    /**
     * <code>required string group = 5;</code>
     * @return Whether the group field is set.
     */
    boolean hasGroup();
    /**
     * <code>required string group = 5;</code>
     * @return The group.
     */
    java.lang.String getGroup();
    /**
     * <code>required string group = 5;</code>
     * @return The bytes for group.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupBytes();

    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return Whether the createTime field is set.
     */
    boolean hasCreateTime();
    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return The createTime.
     */
    java.lang.String getCreateTime();
    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return The bytes for createTime.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCreateTimeBytes();
  }
  /**
   * <pre>
   **
   * Information related to a snapshot
   * TODO: add more information
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto}
   */
  public static final class SnapshotInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.SnapshotInfoProto)
      SnapshotInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use SnapshotInfoProto.newBuilder() to construct.
    private SnapshotInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private SnapshotInfoProto() {
      snapshotName_ = "";
      snapshotRoot_ = "";
      owner_ = "";
      group_ = "";
      createTime_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new SnapshotInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int SNAPSHOTNAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object snapshotName_ = "";
    /**
     * <code>required string snapshotName = 1;</code>
     * @return Whether the snapshotName field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string snapshotName = 1;</code>
     * @return The snapshotName.
     */
    @java.lang.Override
    public java.lang.String getSnapshotName() {
      java.lang.Object ref = snapshotName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          snapshotName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string snapshotName = 1;</code>
     * @return The bytes for snapshotName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotNameBytes() {
      java.lang.Object ref = snapshotName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        snapshotName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SNAPSHOTROOT_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object snapshotRoot_ = "";
    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return Whether the snapshotRoot field is set.
     */
    @java.lang.Override
    public boolean hasSnapshotRoot() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return The snapshotRoot.
     */
    @java.lang.Override
    public java.lang.String getSnapshotRoot() {
      java.lang.Object ref = snapshotRoot_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          snapshotRoot_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string snapshotRoot = 2;</code>
     * @return The bytes for snapshotRoot.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getSnapshotRootBytes() {
      java.lang.Object ref = snapshotRoot_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        snapshotRoot_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int PERMISSION_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_;
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     * @return Whether the permission field is set.
     */
    @java.lang.Override
    public boolean hasPermission() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     * @return The permission.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
      return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
    }
    /**
     * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
      return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
    }

    public static final int OWNER_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private volatile java.lang.Object owner_ = "";
    /**
     * <code>required string owner = 4;</code>
     * @return Whether the owner field is set.
     */
    @java.lang.Override
    public boolean hasOwner() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required string owner = 4;</code>
     * @return The owner.
     */
    @java.lang.Override
    public java.lang.String getOwner() {
      java.lang.Object ref = owner_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          owner_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string owner = 4;</code>
     * @return The bytes for owner.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getOwnerBytes() {
      java.lang.Object ref = owner_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        owner_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int GROUP_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object group_ = "";
    /**
     * <code>required string group = 5;</code>
     * @return Whether the group field is set.
     */
    @java.lang.Override
    public boolean hasGroup() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required string group = 5;</code>
     * @return The group.
     */
    @java.lang.Override
    public java.lang.String getGroup() {
      java.lang.Object ref = group_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          group_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string group = 5;</code>
     * @return The bytes for group.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getGroupBytes() {
      java.lang.Object ref = group_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        group_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CREATETIME_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private volatile java.lang.Object createTime_ = "";
    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return Whether the createTime field is set.
     */
    @java.lang.Override
    public boolean hasCreateTime() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return The createTime.
     */
    @java.lang.Override
    public java.lang.String getCreateTime() {
      java.lang.Object ref = createTime_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          createTime_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * TODO: do we need access time?
     * </pre>
     *
     * <code>required string createTime = 6;</code>
     * @return The bytes for createTime.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCreateTimeBytes() {
      java.lang.Object ref = createTime_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        createTime_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSnapshotName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSnapshotRoot()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPermission()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOwner()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasGroup()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCreateTime()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getPermission().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, snapshotName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, snapshotRoot_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getPermission());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, owner_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, group_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 6, createTime_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, snapshotName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, snapshotRoot_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getPermission());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, owner_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, group_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(6, createTime_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) obj;

      if (hasSnapshotName() != other.hasSnapshotName()) return false;
      if (hasSnapshotName()) {
        if (!getSnapshotName()
            .equals(other.getSnapshotName())) return false;
      }
      if (hasSnapshotRoot() != other.hasSnapshotRoot()) return false;
      if (hasSnapshotRoot()) {
        if (!getSnapshotRoot()
            .equals(other.getSnapshotRoot())) return false;
      }
      if (hasPermission() != other.hasPermission()) return false;
      if (hasPermission()) {
        if (!getPermission()
            .equals(other.getPermission())) return false;
      }
      if (hasOwner() != other.hasOwner()) return false;
      if (hasOwner()) {
        if (!getOwner()
            .equals(other.getOwner())) return false;
      }
      if (hasGroup() != other.hasGroup()) return false;
      if (hasGroup()) {
        if (!getGroup()
            .equals(other.getGroup())) return false;
      }
      if (hasCreateTime() != other.hasCreateTime()) return false;
      if (hasCreateTime()) {
        if (!getCreateTime()
            .equals(other.getCreateTime())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSnapshotName()) {
        hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotName().hashCode();
      }
      if (hasSnapshotRoot()) {
        hash = (37 * hash) + SNAPSHOTROOT_FIELD_NUMBER;
        hash = (53 * hash) + getSnapshotRoot().hashCode();
      }
      if (hasPermission()) {
        hash = (37 * hash) + PERMISSION_FIELD_NUMBER;
        hash = (53 * hash) + getPermission().hashCode();
      }
      if (hasOwner()) {
        hash = (37 * hash) + OWNER_FIELD_NUMBER;
        hash = (53 * hash) + getOwner().hashCode();
      }
      if (hasGroup()) {
        hash = (37 * hash) + GROUP_FIELD_NUMBER;
        hash = (53 * hash) + getGroup().hashCode();
      }
      if (hasCreateTime()) {
        hash = (37 * hash) + CREATETIME_FIELD_NUMBER;
        hash = (53 * hash) + getCreateTime().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Information related to a snapshot
     * TODO: add more information
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.SnapshotInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.SnapshotInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getPermissionFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        snapshotName_ = "";
        snapshotRoot_ = "";
        permission_ = null;
        if (permissionBuilder_ != null) {
          permissionBuilder_.dispose();
          permissionBuilder_ = null;
        }
        owner_ = "";
        group_ = "";
        createTime_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.snapshotName_ = snapshotName_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.snapshotRoot_ = snapshotRoot_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.permission_ = permissionBuilder_ == null
              ? permission_
              : permissionBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.owner_ = owner_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.group_ = group_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.createTime_ = createTime_;
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto.getDefaultInstance()) return this;
        if (other.hasSnapshotName()) {
          snapshotName_ = other.snapshotName_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasSnapshotRoot()) {
          snapshotRoot_ = other.snapshotRoot_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasPermission()) {
          mergePermission(other.getPermission());
        }
        if (other.hasOwner()) {
          owner_ = other.owner_;
          bitField0_ |= 0x00000008;
          onChanged();
        }
        if (other.hasGroup()) {
          group_ = other.group_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        if (other.hasCreateTime()) {
          createTime_ = other.createTime_;
          bitField0_ |= 0x00000020;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSnapshotName()) {
          return false;
        }
        if (!hasSnapshotRoot()) {
          return false;
        }
        if (!hasPermission()) {
          return false;
        }
        if (!hasOwner()) {
          return false;
        }
        if (!hasGroup()) {
          return false;
        }
        if (!hasCreateTime()) {
          return false;
        }
        if (!getPermission().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                snapshotName_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                snapshotRoot_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getPermissionFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                owner_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                group_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 50: {
                createTime_ = input.readBytes();
                bitField0_ |= 0x00000020;
                break;
              } // case 50
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object snapshotName_ = "";
      /**
       * <code>required string snapshotName = 1;</code>
       * @return Whether the snapshotName field is set.
       */
      public boolean hasSnapshotName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string snapshotName = 1;</code>
       * @return The snapshotName.
       */
      public java.lang.String getSnapshotName() {
        java.lang.Object ref = snapshotName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            snapshotName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string snapshotName = 1;</code>
       * @return The bytes for snapshotName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSnapshotNameBytes() {
        java.lang.Object ref = snapshotName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          snapshotName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string snapshotName = 1;</code>
       * @param value The snapshotName to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string snapshotName = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotName() {
        snapshotName_ = getDefaultInstance().getSnapshotName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string snapshotName = 1;</code>
       * @param value The bytes for snapshotName to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private java.lang.Object snapshotRoot_ = "";
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @return Whether the snapshotRoot field is set.
       */
      public boolean hasSnapshotRoot() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @return The snapshotRoot.
       */
      public java.lang.String getSnapshotRoot() {
        java.lang.Object ref = snapshotRoot_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            snapshotRoot_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @return The bytes for snapshotRoot.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getSnapshotRootBytes() {
        java.lang.Object ref = snapshotRoot_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          snapshotRoot_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @param value The snapshotRoot to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotRoot(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotRoot_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSnapshotRoot() {
        snapshotRoot_ = getDefaultInstance().getSnapshotRoot();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string snapshotRoot = 2;</code>
       * @param value The bytes for snapshotRoot to set.
       * @return This builder for chaining.
       */
      public Builder setSnapshotRootBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        snapshotRoot_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto permission_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> permissionBuilder_;
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       * @return Whether the permission field is set.
       */
      public boolean hasPermission() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       * @return The permission.
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto getPermission() {
        if (permissionBuilder_ == null) {
          return permission_ == null ? org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
        } else {
          return permissionBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public Builder setPermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permissionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          permission_ = value;
        } else {
          permissionBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public Builder setPermission(
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder builderForValue) {
        if (permissionBuilder_ == null) {
          permission_ = builderForValue.build();
        } else {
          permissionBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public Builder mergePermission(org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto value) {
        if (permissionBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            permission_ != null &&
            permission_ != org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance()) {
            getPermissionBuilder().mergeFrom(value);
          } else {
            permission_ = value;
          }
        } else {
          permissionBuilder_.mergeFrom(value);
        }
        if (permission_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public Builder clearPermission() {
        bitField0_ = (bitField0_ & ~0x00000004);
        permission_ = null;
        if (permissionBuilder_ != null) {
          permissionBuilder_.dispose();
          permissionBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder getPermissionBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getPermissionFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder getPermissionOrBuilder() {
        if (permissionBuilder_ != null) {
          return permissionBuilder_.getMessageOrBuilder();
        } else {
          return permission_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.getDefaultInstance() : permission_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.FsPermissionProto permission = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder> 
          getPermissionFieldBuilder() {
        if (permissionBuilder_ == null) {
          permissionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.AclProtos.FsPermissionProtoOrBuilder>(
                  getPermission(),
                  getParentForChildren(),
                  isClean());
          permission_ = null;
        }
        return permissionBuilder_;
      }

      private java.lang.Object owner_ = "";
      /**
       * <code>required string owner = 4;</code>
       * @return Whether the owner field is set.
       */
      public boolean hasOwner() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required string owner = 4;</code>
       * @return The owner.
       */
      public java.lang.String getOwner() {
        java.lang.Object ref = owner_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            owner_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string owner = 4;</code>
       * @return The bytes for owner.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getOwnerBytes() {
        java.lang.Object ref = owner_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          owner_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string owner = 4;</code>
       * @param value The owner to set.
       * @return This builder for chaining.
       */
      public Builder setOwner(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        owner_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required string owner = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearOwner() {
        owner_ = getDefaultInstance().getOwner();
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      /**
       * <code>required string owner = 4;</code>
       * @param value The bytes for owner to set.
       * @return This builder for chaining.
       */
      public Builder setOwnerBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        owner_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }

      private java.lang.Object group_ = "";
      /**
       * <code>required string group = 5;</code>
       * @return Whether the group field is set.
       */
      public boolean hasGroup() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required string group = 5;</code>
       * @return The group.
       */
      public java.lang.String getGroup() {
        java.lang.Object ref = group_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            group_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string group = 5;</code>
       * @return The bytes for group.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getGroupBytes() {
        java.lang.Object ref = group_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          group_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string group = 5;</code>
       * @param value The group to set.
       * @return This builder for chaining.
       */
      public Builder setGroup(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        group_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required string group = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearGroup() {
        group_ = getDefaultInstance().getGroup();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * <code>required string group = 5;</code>
       * @param value The bytes for group to set.
       * @return This builder for chaining.
       */
      public Builder setGroupBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        group_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }

      private java.lang.Object createTime_ = "";
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @return Whether the createTime field is set.
       */
      public boolean hasCreateTime() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @return The createTime.
       */
      public java.lang.String getCreateTime() {
        java.lang.Object ref = createTime_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            createTime_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @return The bytes for createTime.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCreateTimeBytes() {
        java.lang.Object ref = createTime_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          createTime_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @param value The createTime to set.
       * @return This builder for chaining.
       */
      public Builder setCreateTime(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        createTime_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearCreateTime() {
        createTime_ = getDefaultInstance().getCreateTime();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * TODO: do we need access time?
       * </pre>
       *
       * <code>required string createTime = 6;</code>
       * @param value The bytes for createTime to set.
       * @return This builder for chaining.
       */
      public Builder setCreateTimeBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        createTime_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SnapshotInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.SnapshotInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<SnapshotInfoProto>() {
      @java.lang.Override
      public SnapshotInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<SnapshotInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RollingUpgradeStatusProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RollingUpgradeStatusProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string blockPoolId = 1;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>required string blockPoolId = 1;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>required string blockPoolId = 1;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>optional bool finalized = 2 [default = false];</code>
     * @return Whether the finalized field is set.
     */
    boolean hasFinalized();
    /**
     * <code>optional bool finalized = 2 [default = false];</code>
     * @return The finalized.
     */
    boolean getFinalized();
  }
  /**
   * <pre>
   **
   * Rolling upgrade status
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto}
   */
  public static final class RollingUpgradeStatusProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.RollingUpgradeStatusProto)
      RollingUpgradeStatusProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RollingUpgradeStatusProto.newBuilder() to construct.
    private RollingUpgradeStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RollingUpgradeStatusProto() {
      blockPoolId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RollingUpgradeStatusProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCKPOOLID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>required string blockPoolId = 1;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string blockPoolId = 1;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string blockPoolId = 1;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int FINALIZED_FIELD_NUMBER = 2;
    private boolean finalized_ = false;
    /**
     * <code>optional bool finalized = 2 [default = false];</code>
     * @return Whether the finalized field is set.
     */
    @java.lang.Override
    public boolean hasFinalized() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional bool finalized = 2 [default = false];</code>
     * @return The finalized.
     */
    @java.lang.Override
    public boolean getFinalized() {
      return finalized_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlockPoolId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, blockPoolId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBool(2, finalized_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, blockPoolId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(2, finalized_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) obj;

      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (hasFinalized() != other.hasFinalized()) return false;
      if (hasFinalized()) {
        if (getFinalized()
            != other.getFinalized()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (hasFinalized()) {
        hash = (37 * hash) + FINALIZED_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getFinalized());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Rolling upgrade status
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.RollingUpgradeStatusProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RollingUpgradeStatusProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        blockPoolId_ = "";
        finalized_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.finalized_ = finalized_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto.getDefaultInstance()) return this;
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasFinalized()) {
          setFinalized(other.getFinalized());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlockPoolId()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                finalized_ = input.readBool();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>required string blockPoolId = 1;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string blockPoolId = 1;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 1;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string blockPoolId = 1;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string blockPoolId = 1;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private boolean finalized_ ;
      /**
       * <code>optional bool finalized = 2 [default = false];</code>
       * @return Whether the finalized field is set.
       */
      @java.lang.Override
      public boolean hasFinalized() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bool finalized = 2 [default = false];</code>
       * @return The finalized.
       */
      @java.lang.Override
      public boolean getFinalized() {
        return finalized_;
      }
      /**
       * <code>optional bool finalized = 2 [default = false];</code>
       * @param value The finalized to set.
       * @return This builder for chaining.
       */
      public Builder setFinalized(boolean value) {

        finalized_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool finalized = 2 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearFinalized() {
        bitField0_ = (bitField0_ & ~0x00000002);
        finalized_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RollingUpgradeStatusProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.RollingUpgradeStatusProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RollingUpgradeStatusProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RollingUpgradeStatusProto>() {
      @java.lang.Override
      public RollingUpgradeStatusProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RollingUpgradeStatusProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RollingUpgradeStatusProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface StorageUuidsProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.StorageUuidsProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>repeated string storageUuids = 1;</code>
     * @return A list containing the storageUuids.
     */
    java.util.List<java.lang.String>
        getStorageUuidsList();
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @return The count of storageUuids.
     */
    int getStorageUuidsCount();
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @param index The index of the element to return.
     * @return The storageUuids at the given index.
     */
    java.lang.String getStorageUuids(int index);
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageUuids at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidsBytes(int index);
  }
  /**
   * <pre>
   **
   * A list of storage IDs.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.StorageUuidsProto}
   */
  public static final class StorageUuidsProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.StorageUuidsProto)
      StorageUuidsProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use StorageUuidsProto.newBuilder() to construct.
    private StorageUuidsProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private StorageUuidsProto() {
      storageUuids_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new StorageUuidsProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class);
    }

    public static final int STORAGEUUIDS_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageUuids_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @return A list containing the storageUuids.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getStorageUuidsList() {
      return storageUuids_;
    }
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @return The count of storageUuids.
     */
    public int getStorageUuidsCount() {
      return storageUuids_.size();
    }
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @param index The index of the element to return.
     * @return The storageUuids at the given index.
     */
    public java.lang.String getStorageUuids(int index) {
      return storageUuids_.get(index);
    }
    /**
     * <code>repeated string storageUuids = 1;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageUuids at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageUuidsBytes(int index) {
      return storageUuids_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      for (int i = 0; i < storageUuids_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, storageUuids_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      {
        int dataSize = 0;
        for (int i = 0; i < storageUuids_.size(); i++) {
          dataSize += computeStringSizeNoTag(storageUuids_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getStorageUuidsList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) obj;

      if (!getStorageUuidsList()
          .equals(other.getStorageUuidsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (getStorageUuidsCount() > 0) {
        hash = (37 * hash) + STORAGEUUIDS_FIELD_NUMBER;
        hash = (53 * hash) + getStorageUuidsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * A list of storage IDs.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.StorageUuidsProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.StorageUuidsProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        storageUuids_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          storageUuids_.makeImmutable();
          result.storageUuids_ = storageUuids_;
        }
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto.getDefaultInstance()) return this;
        if (!other.storageUuids_.isEmpty()) {
          if (storageUuids_.isEmpty()) {
            storageUuids_ = other.storageUuids_;
            bitField0_ |= 0x00000001;
          } else {
            ensureStorageUuidsIsMutable();
            storageUuids_.addAll(other.storageUuids_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureStorageUuidsIsMutable();
                storageUuids_.add(bs);
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageUuids_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureStorageUuidsIsMutable() {
        if (!storageUuids_.isModifiable()) {
          storageUuids_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageUuids_);
        }
        bitField0_ |= 0x00000001;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @return A list containing the storageUuids.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getStorageUuidsList() {
        storageUuids_.makeImmutable();
        return storageUuids_;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @return The count of storageUuids.
       */
      public int getStorageUuidsCount() {
        return storageUuids_.size();
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param index The index of the element to return.
       * @return The storageUuids at the given index.
       */
      public java.lang.String getStorageUuids(int index) {
        return storageUuids_.get(index);
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param index The index of the value to return.
       * @return The bytes of the storageUuids at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageUuidsBytes(int index) {
        return storageUuids_.getByteString(index);
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param index The index to set the value at.
       * @param value The storageUuids to set.
       * @return This builder for chaining.
       */
      public Builder setStorageUuids(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageUuidsIsMutable();
        storageUuids_.set(index, value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param value The storageUuids to add.
       * @return This builder for chaining.
       */
      public Builder addStorageUuids(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageUuidsIsMutable();
        storageUuids_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param values The storageUuids to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageUuids(
          java.lang.Iterable<java.lang.String> values) {
        ensureStorageUuidsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, storageUuids_);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageUuids() {
        storageUuids_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000001);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageUuids = 1;</code>
       * @param value The bytes of the storageUuids to add.
       * @return This builder for chaining.
       */
      public Builder addStorageUuidsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageUuidsIsMutable();
        storageUuids_.add(value);
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StorageUuidsProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.StorageUuidsProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<StorageUuidsProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<StorageUuidsProto>() {
      @java.lang.Override
      public StorageUuidsProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<StorageUuidsProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<StorageUuidsProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockTokenSecretProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockTokenSecretProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint64 expiryDate = 1;</code>
     * @return Whether the expiryDate field is set.
     */
    boolean hasExpiryDate();
    /**
     * <code>optional uint64 expiryDate = 1;</code>
     * @return The expiryDate.
     */
    long getExpiryDate();

    /**
     * <code>optional uint32 keyId = 2;</code>
     * @return Whether the keyId field is set.
     */
    boolean hasKeyId();
    /**
     * <code>optional uint32 keyId = 2;</code>
     * @return The keyId.
     */
    int getKeyId();

    /**
     * <code>optional string userId = 3;</code>
     * @return Whether the userId field is set.
     */
    boolean hasUserId();
    /**
     * <code>optional string userId = 3;</code>
     * @return The userId.
     */
    java.lang.String getUserId();
    /**
     * <code>optional string userId = 3;</code>
     * @return The bytes for userId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getUserIdBytes();

    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return Whether the blockPoolId field is set.
     */
    boolean hasBlockPoolId();
    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return The blockPoolId.
     */
    java.lang.String getBlockPoolId();
    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return The bytes for blockPoolId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes();

    /**
     * <code>optional uint64 blockId = 5;</code>
     * @return Whether the blockId field is set.
     */
    boolean hasBlockId();
    /**
     * <code>optional uint64 blockId = 5;</code>
     * @return The blockId.
     */
    long getBlockId();

    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @return A list containing the modes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> getModesList();
    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @return The count of modes.
     */
    int getModesCount();
    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @param index The index of the element to return.
     * @return The modes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index);

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return A list containing the storageTypes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return The count of storageTypes.
     */
    int getStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index);

    /**
     * <code>repeated string storageIds = 8;</code>
     * @return A list containing the storageIds.
     */
    java.util.List<java.lang.String>
        getStorageIdsList();
    /**
     * <code>repeated string storageIds = 8;</code>
     * @return The count of storageIds.
     */
    int getStorageIdsCount();
    /**
     * <code>repeated string storageIds = 8;</code>
     * @param index The index of the element to return.
     * @return The storageIds at the given index.
     */
    java.lang.String getStorageIds(int index);
    /**
     * <code>repeated string storageIds = 8;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageIds at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdsBytes(int index);

    /**
     * <code>optional bytes handshakeSecret = 9;</code>
     * @return Whether the handshakeSecret field is set.
     */
    boolean hasHandshakeSecret();
    /**
     * <code>optional bytes handshakeSecret = 9;</code>
     * @return The handshakeSecret.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret();
  }
  /**
   * <pre>
   **
   * Secret information for the BlockKeyProto. This is not sent on the wire as
   * such but is used to pack a byte array and encrypted and put in
   * BlockKeyProto.bytes
   * When adding further fields, make sure they are optional as they would
   * otherwise not be backwards compatible.
   *
   * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
   * to Protocol Buffers, we use the first byte to determine the type. If the
   * first byte is &lt;=0 then it is a legacy token. This means that when using
   * protobuf tokens, the the first field sent must have a `field_number` less
   * than 16 to make sure that the first byte is positive. Otherwise it could be
   * parsed as a legacy token. See HDFS-11026 for more discussion.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto}
   */
  public static final class BlockTokenSecretProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockTokenSecretProto)
      BlockTokenSecretProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockTokenSecretProto.newBuilder() to construct.
    private BlockTokenSecretProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockTokenSecretProto() {
      userId_ = "";
      blockPoolId_ = "";
      modes_ = java.util.Collections.emptyList();
      storageTypes_ = java.util.Collections.emptyList();
      storageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockTokenSecretProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class);
    }

    private int bitField0_;
    public static final int EXPIRYDATE_FIELD_NUMBER = 1;
    private long expiryDate_ = 0L;
    /**
     * <code>optional uint64 expiryDate = 1;</code>
     * @return Whether the expiryDate field is set.
     */
    @java.lang.Override
    public boolean hasExpiryDate() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint64 expiryDate = 1;</code>
     * @return The expiryDate.
     */
    @java.lang.Override
    public long getExpiryDate() {
      return expiryDate_;
    }

    public static final int KEYID_FIELD_NUMBER = 2;
    private int keyId_ = 0;
    /**
     * <code>optional uint32 keyId = 2;</code>
     * @return Whether the keyId field is set.
     */
    @java.lang.Override
    public boolean hasKeyId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint32 keyId = 2;</code>
     * @return The keyId.
     */
    @java.lang.Override
    public int getKeyId() {
      return keyId_;
    }

    public static final int USERID_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object userId_ = "";
    /**
     * <code>optional string userId = 3;</code>
     * @return Whether the userId field is set.
     */
    @java.lang.Override
    public boolean hasUserId() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string userId = 3;</code>
     * @return The userId.
     */
    @java.lang.Override
    public java.lang.String getUserId() {
      java.lang.Object ref = userId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          userId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string userId = 3;</code>
     * @return The bytes for userId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getUserIdBytes() {
      java.lang.Object ref = userId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        userId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKPOOLID_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private volatile java.lang.Object blockPoolId_ = "";
    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return Whether the blockPoolId field is set.
     */
    @java.lang.Override
    public boolean hasBlockPoolId() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return The blockPoolId.
     */
    @java.lang.Override
    public java.lang.String getBlockPoolId() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          blockPoolId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string blockPoolId = 4;</code>
     * @return The bytes for blockPoolId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBlockPoolIdBytes() {
      java.lang.Object ref = blockPoolId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        blockPoolId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int BLOCKID_FIELD_NUMBER = 5;
    private long blockId_ = 0L;
    /**
     * <code>optional uint64 blockId = 5;</code>
     * @return Whether the blockId field is set.
     */
    @java.lang.Override
    public boolean hasBlockId() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional uint64 blockId = 5;</code>
     * @return The blockId.
     */
    @java.lang.Override
    public long getBlockId() {
      return blockId_;
    }

    public static final int MODES_FIELD_NUMBER = 6;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> modes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> modes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.READ : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @return A list containing the modes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> getModesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @return The count of modes.
     */
    @java.lang.Override
    public int getModesCount() {
      return modes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
     * @param index The index of the element to return.
     * @return The modes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) {
      return modes_converter_.convert(modes_.get(index));
    }

    public static final int STORAGETYPES_FIELD_NUMBER = 7;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> storageTypes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> storageTypes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return A list containing the storageTypes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @return The count of storageTypes.
     */
    @java.lang.Override
    public int getStorageTypesCount() {
      return storageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
     * @param index The index of the element to return.
     * @return The storageTypes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
      return storageTypes_converter_.convert(storageTypes_.get(index));
    }

    public static final int STORAGEIDS_FIELD_NUMBER = 8;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageIds_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string storageIds = 8;</code>
     * @return A list containing the storageIds.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getStorageIdsList() {
      return storageIds_;
    }
    /**
     * <code>repeated string storageIds = 8;</code>
     * @return The count of storageIds.
     */
    public int getStorageIdsCount() {
      return storageIds_.size();
    }
    /**
     * <code>repeated string storageIds = 8;</code>
     * @param index The index of the element to return.
     * @return The storageIds at the given index.
     */
    public java.lang.String getStorageIds(int index) {
      return storageIds_.get(index);
    }
    /**
     * <code>repeated string storageIds = 8;</code>
     * @param index The index of the value to return.
     * @return The bytes of the storageIds at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdsBytes(int index) {
      return storageIds_.getByteString(index);
    }

    public static final int HANDSHAKESECRET_FIELD_NUMBER = 9;
    private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes handshakeSecret = 9;</code>
     * @return Whether the handshakeSecret field is set.
     */
    @java.lang.Override
    public boolean hasHandshakeSecret() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>optional bytes handshakeSecret = 9;</code>
     * @return The handshakeSecret.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() {
      return handshakeSecret_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, expiryDate_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, keyId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, userId_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, blockPoolId_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(5, blockId_);
      }
      for (int i = 0; i < modes_.size(); i++) {
        output.writeEnum(6, modes_.get(i));
      }
      for (int i = 0; i < storageTypes_.size(); i++) {
        output.writeEnum(7, storageTypes_.get(i));
      }
      for (int i = 0; i < storageIds_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 8, storageIds_.getRaw(i));
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeBytes(9, handshakeSecret_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, expiryDate_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, keyId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, userId_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(4, blockPoolId_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(5, blockId_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < modes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(modes_.get(i));
        }
        size += dataSize;
        size += 1 * modes_.size();
      }
      {
        int dataSize = 0;
        for (int i = 0; i < storageTypes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(storageTypes_.get(i));
        }
        size += dataSize;
        size += 1 * storageTypes_.size();
      }
      {
        int dataSize = 0;
        for (int i = 0; i < storageIds_.size(); i++) {
          dataSize += computeStringSizeNoTag(storageIds_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getStorageIdsList().size();
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(9, handshakeSecret_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) obj;

      if (hasExpiryDate() != other.hasExpiryDate()) return false;
      if (hasExpiryDate()) {
        if (getExpiryDate()
            != other.getExpiryDate()) return false;
      }
      if (hasKeyId() != other.hasKeyId()) return false;
      if (hasKeyId()) {
        if (getKeyId()
            != other.getKeyId()) return false;
      }
      if (hasUserId() != other.hasUserId()) return false;
      if (hasUserId()) {
        if (!getUserId()
            .equals(other.getUserId())) return false;
      }
      if (hasBlockPoolId() != other.hasBlockPoolId()) return false;
      if (hasBlockPoolId()) {
        if (!getBlockPoolId()
            .equals(other.getBlockPoolId())) return false;
      }
      if (hasBlockId() != other.hasBlockId()) return false;
      if (hasBlockId()) {
        if (getBlockId()
            != other.getBlockId()) return false;
      }
      if (!modes_.equals(other.modes_)) return false;
      if (!storageTypes_.equals(other.storageTypes_)) return false;
      if (!getStorageIdsList()
          .equals(other.getStorageIdsList())) return false;
      if (hasHandshakeSecret() != other.hasHandshakeSecret()) return false;
      if (hasHandshakeSecret()) {
        if (!getHandshakeSecret()
            .equals(other.getHandshakeSecret())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasExpiryDate()) {
        hash = (37 * hash) + EXPIRYDATE_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getExpiryDate());
      }
      if (hasKeyId()) {
        hash = (37 * hash) + KEYID_FIELD_NUMBER;
        hash = (53 * hash) + getKeyId();
      }
      if (hasUserId()) {
        hash = (37 * hash) + USERID_FIELD_NUMBER;
        hash = (53 * hash) + getUserId().hashCode();
      }
      if (hasBlockPoolId()) {
        hash = (37 * hash) + BLOCKPOOLID_FIELD_NUMBER;
        hash = (53 * hash) + getBlockPoolId().hashCode();
      }
      if (hasBlockId()) {
        hash = (37 * hash) + BLOCKID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getBlockId());
      }
      if (getModesCount() > 0) {
        hash = (37 * hash) + MODES_FIELD_NUMBER;
        hash = (53 * hash) + modes_.hashCode();
      }
      if (getStorageTypesCount() > 0) {
        hash = (37 * hash) + STORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + storageTypes_.hashCode();
      }
      if (getStorageIdsCount() > 0) {
        hash = (37 * hash) + STORAGEIDS_FIELD_NUMBER;
        hash = (53 * hash) + getStorageIdsList().hashCode();
      }
      if (hasHandshakeSecret()) {
        hash = (37 * hash) + HANDSHAKESECRET_FIELD_NUMBER;
        hash = (53 * hash) + getHandshakeSecret().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Secret information for the BlockKeyProto. This is not sent on the wire as
     * such but is used to pack a byte array and encrypted and put in
     * BlockKeyProto.bytes
     * When adding further fields, make sure they are optional as they would
     * otherwise not be backwards compatible.
     *
     * Note: As part of the migration from WritableUtils based tokens (aka "legacy")
     * to Protocol Buffers, we use the first byte to determine the type. If the
     * first byte is &lt;=0 then it is a legacy token. This means that when using
     * protobuf tokens, the the first field sent must have a `field_number` less
     * than 16 to make sure that the first byte is positive. Otherwise it could be
     * parsed as a legacy token. See HDFS-11026 for more discussion.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.BlockTokenSecretProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockTokenSecretProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        expiryDate_ = 0L;
        keyId_ = 0;
        userId_ = "";
        blockPoolId_ = "";
        blockId_ = 0L;
        modes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000020);
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000040);
        storageIds_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result) {
        if (((bitField0_ & 0x00000020) != 0)) {
          modes_ = java.util.Collections.unmodifiableList(modes_);
          bitField0_ = (bitField0_ & ~0x00000020);
        }
        result.modes_ = modes_;
        if (((bitField0_ & 0x00000040) != 0)) {
          storageTypes_ = java.util.Collections.unmodifiableList(storageTypes_);
          bitField0_ = (bitField0_ & ~0x00000040);
        }
        result.storageTypes_ = storageTypes_;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.expiryDate_ = expiryDate_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.keyId_ = keyId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.userId_ = userId_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.blockPoolId_ = blockPoolId_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.blockId_ = blockId_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          storageIds_.makeImmutable();
          result.storageIds_ = storageIds_;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.handshakeSecret_ = handshakeSecret_;
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto.getDefaultInstance()) return this;
        if (other.hasExpiryDate()) {
          setExpiryDate(other.getExpiryDate());
        }
        if (other.hasKeyId()) {
          setKeyId(other.getKeyId());
        }
        if (other.hasUserId()) {
          userId_ = other.userId_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (other.hasBlockPoolId()) {
          blockPoolId_ = other.blockPoolId_;
          bitField0_ |= 0x00000008;
          onChanged();
        }
        if (other.hasBlockId()) {
          setBlockId(other.getBlockId());
        }
        if (!other.modes_.isEmpty()) {
          if (modes_.isEmpty()) {
            modes_ = other.modes_;
            bitField0_ = (bitField0_ & ~0x00000020);
          } else {
            ensureModesIsMutable();
            modes_.addAll(other.modes_);
          }
          onChanged();
        }
        if (!other.storageTypes_.isEmpty()) {
          if (storageTypes_.isEmpty()) {
            storageTypes_ = other.storageTypes_;
            bitField0_ = (bitField0_ & ~0x00000040);
          } else {
            ensureStorageTypesIsMutable();
            storageTypes_.addAll(other.storageTypes_);
          }
          onChanged();
        }
        if (!other.storageIds_.isEmpty()) {
          if (storageIds_.isEmpty()) {
            storageIds_ = other.storageIds_;
            bitField0_ |= 0x00000080;
          } else {
            ensureStorageIdsIsMutable();
            storageIds_.addAll(other.storageIds_);
          }
          onChanged();
        }
        if (other.hasHandshakeSecret()) {
          setHandshakeSecret(other.getHandshakeSecret());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                expiryDate_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                keyId_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                userId_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                blockPoolId_ = input.readBytes();
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                blockId_ = input.readUInt64();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(6, tmpRaw);
                } else {
                  ensureModesIsMutable();
                  modes_.add(tmpRaw);
                }
                break;
              } // case 48
              case 50: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(6, tmpRaw);
                  } else {
                    ensureModesIsMutable();
                    modes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 50
              case 56: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(7, tmpRaw);
                } else {
                  ensureStorageTypesIsMutable();
                  storageTypes_.add(tmpRaw);
                }
                break;
              } // case 56
              case 58: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(7, tmpRaw);
                  } else {
                    ensureStorageTypesIsMutable();
                    storageTypes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 58
              case 66: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureStorageIdsIsMutable();
                storageIds_.add(bs);
                break;
              } // case 66
              case 74: {
                handshakeSecret_ = input.readBytes();
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long expiryDate_ ;
      /**
       * <code>optional uint64 expiryDate = 1;</code>
       * @return Whether the expiryDate field is set.
       */
      @java.lang.Override
      public boolean hasExpiryDate() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 expiryDate = 1;</code>
       * @return The expiryDate.
       */
      @java.lang.Override
      public long getExpiryDate() {
        return expiryDate_;
      }
      /**
       * <code>optional uint64 expiryDate = 1;</code>
       * @param value The expiryDate to set.
       * @return This builder for chaining.
       */
      public Builder setExpiryDate(long value) {

        expiryDate_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 expiryDate = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearExpiryDate() {
        bitField0_ = (bitField0_ & ~0x00000001);
        expiryDate_ = 0L;
        onChanged();
        return this;
      }

      private int keyId_ ;
      /**
       * <code>optional uint32 keyId = 2;</code>
       * @return Whether the keyId field is set.
       */
      @java.lang.Override
      public boolean hasKeyId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint32 keyId = 2;</code>
       * @return The keyId.
       */
      @java.lang.Override
      public int getKeyId() {
        return keyId_;
      }
      /**
       * <code>optional uint32 keyId = 2;</code>
       * @param value The keyId to set.
       * @return This builder for chaining.
       */
      public Builder setKeyId(int value) {

        keyId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint32 keyId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearKeyId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        keyId_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object userId_ = "";
      /**
       * <code>optional string userId = 3;</code>
       * @return Whether the userId field is set.
       */
      public boolean hasUserId() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string userId = 3;</code>
       * @return The userId.
       */
      public java.lang.String getUserId() {
        java.lang.Object ref = userId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            userId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string userId = 3;</code>
       * @return The bytes for userId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getUserIdBytes() {
        java.lang.Object ref = userId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          userId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string userId = 3;</code>
       * @param value The userId to set.
       * @return This builder for chaining.
       */
      public Builder setUserId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        userId_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string userId = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearUserId() {
        userId_ = getDefaultInstance().getUserId();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string userId = 3;</code>
       * @param value The bytes for userId to set.
       * @return This builder for chaining.
       */
      public Builder setUserIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        userId_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private java.lang.Object blockPoolId_ = "";
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @return Whether the blockPoolId field is set.
       */
      public boolean hasBlockPoolId() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @return The blockPoolId.
       */
      public java.lang.String getBlockPoolId() {
        java.lang.Object ref = blockPoolId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            blockPoolId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @return The bytes for blockPoolId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBlockPoolIdBytes() {
        java.lang.Object ref = blockPoolId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          blockPoolId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @param value The blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockPoolId() {
        blockPoolId_ = getDefaultInstance().getBlockPoolId();
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      /**
       * <code>optional string blockPoolId = 4;</code>
       * @param value The bytes for blockPoolId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockPoolIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockPoolId_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }

      private long blockId_ ;
      /**
       * <code>optional uint64 blockId = 5;</code>
       * @return Whether the blockId field is set.
       */
      @java.lang.Override
      public boolean hasBlockId() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional uint64 blockId = 5;</code>
       * @return The blockId.
       */
      @java.lang.Override
      public long getBlockId() {
        return blockId_;
      }
      /**
       * <code>optional uint64 blockId = 5;</code>
       * @param value The blockId to set.
       * @return This builder for chaining.
       */
      public Builder setBlockId(long value) {

        blockId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 blockId = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockId() {
        bitField0_ = (bitField0_ & ~0x00000010);
        blockId_ = 0L;
        onChanged();
        return this;
      }

      private java.util.List<java.lang.Integer> modes_ =
        java.util.Collections.emptyList();
      private void ensureModesIsMutable() {
        if (!((bitField0_ & 0x00000020) != 0)) {
          modes_ = new java.util.ArrayList<java.lang.Integer>(modes_);
          bitField0_ |= 0x00000020;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @return A list containing the modes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> getModesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto>(modes_, modes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @return The count of modes.
       */
      public int getModesCount() {
        return modes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @param index The index of the element to return.
       * @return The modes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto getModes(int index) {
        return modes_converter_.convert(modes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @param index The index to set the value at.
       * @param value The modes to set.
       * @return This builder for chaining.
       */
      public Builder setModes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureModesIsMutable();
        modes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @param value The modes to add.
       * @return This builder for chaining.
       */
      public Builder addModes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureModesIsMutable();
        modes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @param values The modes to add.
       * @return This builder for chaining.
       */
      public Builder addAllModes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto> values) {
        ensureModesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto value : values) {
          modes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.AccessModeProto modes = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearModes() {
        modes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000020);
        onChanged();
        return this;
      }

      private java.util.List<java.lang.Integer> storageTypes_ =
        java.util.Collections.emptyList();
      private void ensureStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000040) != 0)) {
          storageTypes_ = new java.util.ArrayList<java.lang.Integer>(storageTypes_);
          bitField0_ |= 0x00000040;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return A list containing the storageTypes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getStorageTypesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(storageTypes_, storageTypes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return The count of storageTypes.
       */
      public int getStorageTypesCount() {
        return storageTypes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param index The index of the element to return.
       * @return The storageTypes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageTypes(int index) {
        return storageTypes_converter_.convert(storageTypes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param index The index to set the value at.
       * @param value The storageTypes to set.
       * @return This builder for chaining.
       */
      public Builder setStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param value The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureStorageTypesIsMutable();
        storageTypes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @param values The storageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
        ensureStorageTypesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
          storageTypes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto storageTypes = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageTypes() {
        storageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000040);
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList storageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureStorageIdsIsMutable() {
        if (!storageIds_.isModifiable()) {
          storageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(storageIds_);
        }
        bitField0_ |= 0x00000080;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @return A list containing the storageIds.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getStorageIdsList() {
        storageIds_.makeImmutable();
        return storageIds_;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @return The count of storageIds.
       */
      public int getStorageIdsCount() {
        return storageIds_.size();
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param index The index of the element to return.
       * @return The storageIds at the given index.
       */
      public java.lang.String getStorageIds(int index) {
        return storageIds_.get(index);
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param index The index of the value to return.
       * @return The bytes of the storageIds at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageIdsBytes(int index) {
        return storageIds_.getByteString(index);
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param index The index to set the value at.
       * @param value The storageIds to set.
       * @return This builder for chaining.
       */
      public Builder setStorageIds(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIdsIsMutable();
        storageIds_.set(index, value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param value The storageIds to add.
       * @return This builder for chaining.
       */
      public Builder addStorageIds(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIdsIsMutable();
        storageIds_.add(value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param values The storageIds to add.
       * @return This builder for chaining.
       */
      public Builder addAllStorageIds(
          java.lang.Iterable<java.lang.String> values) {
        ensureStorageIdsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, storageIds_);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageIds() {
        storageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000080);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string storageIds = 8;</code>
       * @param value The bytes of the storageIds to add.
       * @return This builder for chaining.
       */
      public Builder addStorageIdsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureStorageIdsIsMutable();
        storageIds_.add(value);
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString handshakeSecret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes handshakeSecret = 9;</code>
       * @return Whether the handshakeSecret field is set.
       */
      @java.lang.Override
      public boolean hasHandshakeSecret() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <code>optional bytes handshakeSecret = 9;</code>
       * @return The handshakeSecret.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getHandshakeSecret() {
        return handshakeSecret_;
      }
      /**
       * <code>optional bytes handshakeSecret = 9;</code>
       * @param value The handshakeSecret to set.
       * @return This builder for chaining.
       */
      public Builder setHandshakeSecret(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        handshakeSecret_ = value;
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes handshakeSecret = 9;</code>
       * @return This builder for chaining.
       */
      public Builder clearHandshakeSecret() {
        bitField0_ = (bitField0_ & ~0x00000100);
        handshakeSecret_ = getDefaultInstance().getHandshakeSecret();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockTokenSecretProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockTokenSecretProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockTokenSecretProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockTokenSecretProto>() {
      @java.lang.Override
      public BlockTokenSecretProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockTokenSecretProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockTokenSecretProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface RouterFederatedStateProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.RouterFederatedStateProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    int getNamespaceStateIdsCount();
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    boolean containsNamespaceStateIds(
        java.lang.String key);
    /**
     * Use {@link #getNamespaceStateIdsMap()} instead.
     */
    @java.lang.Deprecated
    java.util.Map<java.lang.String, java.lang.Long>
    getNamespaceStateIds();
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    java.util.Map<java.lang.String, java.lang.Long>
    getNamespaceStateIdsMap();
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    long getNamespaceStateIdsOrDefault(
        java.lang.String key,
        long defaultValue);
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    long getNamespaceStateIdsOrThrow(
        java.lang.String key);
  }
  /**
   * <pre>
   **
   * Clients should receive this message in RPC responses and forward it
   * in RPC requests without interpreting it. It should be encoded
   * as an obscure byte array when being sent to clients.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.RouterFederatedStateProto}
   */
  public static final class RouterFederatedStateProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.RouterFederatedStateProto)
      RouterFederatedStateProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use RouterFederatedStateProto.newBuilder() to construct.
    private RouterFederatedStateProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private RouterFederatedStateProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new RouterFederatedStateProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor;
    }

    @SuppressWarnings({"rawtypes"})
    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection(
        int number) {
      switch (number) {
        case 1:
          return internalGetNamespaceStateIds();
        default:
          throw new RuntimeException(
              "Invalid map field number: " + number);
      }
    }
    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.Builder.class);
    }

    public static final int NAMESPACESTATEIDS_FIELD_NUMBER = 1;
    private static final class NamespaceStateIdsDefaultEntryHolder {
      static final org.apache.hadoop.thirdparty.protobuf.MapEntry<
          java.lang.String, java.lang.Long> defaultEntry =
              org.apache.hadoop.thirdparty.protobuf.MapEntry
              .<java.lang.String, java.lang.Long>newDefaultInstance(
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor, 
                  org.apache.hadoop.thirdparty.protobuf.WireFormat.FieldType.STRING,
                  "",
                  org.apache.hadoop.thirdparty.protobuf.WireFormat.FieldType.INT64,
                  0L);
    }
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.MapField<
        java.lang.String, java.lang.Long> namespaceStateIds_;
    private org.apache.hadoop.thirdparty.protobuf.MapField<java.lang.String, java.lang.Long>
    internalGetNamespaceStateIds() {
      if (namespaceStateIds_ == null) {
        return org.apache.hadoop.thirdparty.protobuf.MapField.emptyMapField(
            NamespaceStateIdsDefaultEntryHolder.defaultEntry);
      }
      return namespaceStateIds_;
    }
    public int getNamespaceStateIdsCount() {
      return internalGetNamespaceStateIds().getMap().size();
    }
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    @java.lang.Override
    public boolean containsNamespaceStateIds(
        java.lang.String key) {
      if (key == null) { throw new NullPointerException("map key"); }
      return internalGetNamespaceStateIds().getMap().containsKey(key);
    }
    /**
     * Use {@link #getNamespaceStateIdsMap()} instead.
     */
    @java.lang.Override
    @java.lang.Deprecated
    public java.util.Map<java.lang.String, java.lang.Long> getNamespaceStateIds() {
      return getNamespaceStateIdsMap();
    }
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    @java.lang.Override
    public java.util.Map<java.lang.String, java.lang.Long> getNamespaceStateIdsMap() {
      return internalGetNamespaceStateIds().getMap();
    }
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    @java.lang.Override
    public long getNamespaceStateIdsOrDefault(
        java.lang.String key,
        long defaultValue) {
      if (key == null) { throw new NullPointerException("map key"); }
      java.util.Map<java.lang.String, java.lang.Long> map =
          internalGetNamespaceStateIds().getMap();
      return map.containsKey(key) ? map.get(key) : defaultValue;
    }
    /**
     * <pre>
     * Last seen state IDs for multiple namespaces.
     * </pre>
     *
     * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
     */
    @java.lang.Override
    public long getNamespaceStateIdsOrThrow(
        java.lang.String key) {
      if (key == null) { throw new NullPointerException("map key"); }
      java.util.Map<java.lang.String, java.lang.Long> map =
          internalGetNamespaceStateIds().getMap();
      if (!map.containsKey(key)) {
        throw new java.lang.IllegalArgumentException();
      }
      return map.get(key);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
        .serializeStringMapTo(
          output,
          internalGetNamespaceStateIds(),
          NamespaceStateIdsDefaultEntryHolder.defaultEntry,
          1);
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      for (java.util.Map.Entry<java.lang.String, java.lang.Long> entry
           : internalGetNamespaceStateIds().getMap().entrySet()) {
        org.apache.hadoop.thirdparty.protobuf.MapEntry<java.lang.String, java.lang.Long>
        namespaceStateIds__ = NamespaceStateIdsDefaultEntryHolder.defaultEntry.newBuilderForType()
            .setKey(entry.getKey())
            .setValue(entry.getValue())
            .build();
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeMessageSize(1, namespaceStateIds__);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto other = (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto) obj;

      if (!internalGetNamespaceStateIds().equals(
          other.internalGetNamespaceStateIds())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (!internalGetNamespaceStateIds().getMap().isEmpty()) {
        hash = (37 * hash) + NAMESPACESTATEIDS_FIELD_NUMBER;
        hash = (53 * hash) + internalGetNamespaceStateIds().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Clients should receive this message in RPC responses and forward it
     * in RPC requests without interpreting it. It should be encoded
     * as an obscure byte array when being sent to clients.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.RouterFederatedStateProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.RouterFederatedStateProto)
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor;
      }

      @SuppressWarnings({"rawtypes"})
      protected org.apache.hadoop.thirdparty.protobuf.MapFieldReflectionAccessor internalGetMapFieldReflection(
          int number) {
        switch (number) {
          case 1:
            return internalGetNamespaceStateIds();
          default:
            throw new RuntimeException(
                "Invalid map field number: " + number);
        }
      }
      @SuppressWarnings({"rawtypes"})
      protected org.apache.hadoop.thirdparty.protobuf.MapFieldReflectionAccessor internalGetMutableMapFieldReflection(
          int number) {
        switch (number) {
          case 1:
            return internalGetMutableNamespaceStateIds();
          default:
            throw new RuntimeException(
                "Invalid map field number: " + number);
        }
      }
      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.class, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        internalGetMutableNamespaceStateIds().clear();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto build() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto result) {
        int from_bitField0_ = bitField0_;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.namespaceStateIds_ = internalGetNamespaceStateIds();
          result.namespaceStateIds_.makeImmutable();
        }
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto.getDefaultInstance()) return this;
        internalGetMutableNamespaceStateIds().mergeFrom(
            other.internalGetNamespaceStateIds());
        bitField0_ |= 0x00000001;
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                org.apache.hadoop.thirdparty.protobuf.MapEntry<java.lang.String, java.lang.Long>
                namespaceStateIds__ = input.readMessage(
                    NamespaceStateIdsDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry);
                internalGetMutableNamespaceStateIds().getMutableMap().put(
                    namespaceStateIds__.getKey(), namespaceStateIds__.getValue());
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.MapField<
          java.lang.String, java.lang.Long> namespaceStateIds_;
      private org.apache.hadoop.thirdparty.protobuf.MapField<java.lang.String, java.lang.Long>
          internalGetNamespaceStateIds() {
        if (namespaceStateIds_ == null) {
          return org.apache.hadoop.thirdparty.protobuf.MapField.emptyMapField(
              NamespaceStateIdsDefaultEntryHolder.defaultEntry);
        }
        return namespaceStateIds_;
      }
      private org.apache.hadoop.thirdparty.protobuf.MapField<java.lang.String, java.lang.Long>
          internalGetMutableNamespaceStateIds() {
        if (namespaceStateIds_ == null) {
          namespaceStateIds_ = org.apache.hadoop.thirdparty.protobuf.MapField.newMapField(
              NamespaceStateIdsDefaultEntryHolder.defaultEntry);
        }
        if (!namespaceStateIds_.isMutable()) {
          namespaceStateIds_ = namespaceStateIds_.copy();
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return namespaceStateIds_;
      }
      public int getNamespaceStateIdsCount() {
        return internalGetNamespaceStateIds().getMap().size();
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      @java.lang.Override
      public boolean containsNamespaceStateIds(
          java.lang.String key) {
        if (key == null) { throw new NullPointerException("map key"); }
        return internalGetNamespaceStateIds().getMap().containsKey(key);
      }
      /**
       * Use {@link #getNamespaceStateIdsMap()} instead.
       */
      @java.lang.Override
      @java.lang.Deprecated
      public java.util.Map<java.lang.String, java.lang.Long> getNamespaceStateIds() {
        return getNamespaceStateIdsMap();
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      @java.lang.Override
      public java.util.Map<java.lang.String, java.lang.Long> getNamespaceStateIdsMap() {
        return internalGetNamespaceStateIds().getMap();
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      @java.lang.Override
      public long getNamespaceStateIdsOrDefault(
          java.lang.String key,
          long defaultValue) {
        if (key == null) { throw new NullPointerException("map key"); }
        java.util.Map<java.lang.String, java.lang.Long> map =
            internalGetNamespaceStateIds().getMap();
        return map.containsKey(key) ? map.get(key) : defaultValue;
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      @java.lang.Override
      public long getNamespaceStateIdsOrThrow(
          java.lang.String key) {
        if (key == null) { throw new NullPointerException("map key"); }
        java.util.Map<java.lang.String, java.lang.Long> map =
            internalGetNamespaceStateIds().getMap();
        if (!map.containsKey(key)) {
          throw new java.lang.IllegalArgumentException();
        }
        return map.get(key);
      }
      public Builder clearNamespaceStateIds() {
        bitField0_ = (bitField0_ & ~0x00000001);
        internalGetMutableNamespaceStateIds().getMutableMap()
            .clear();
        return this;
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      public Builder removeNamespaceStateIds(
          java.lang.String key) {
        if (key == null) { throw new NullPointerException("map key"); }
        internalGetMutableNamespaceStateIds().getMutableMap()
            .remove(key);
        return this;
      }
      /**
       * Use alternate mutation accessors instead.
       */
      @java.lang.Deprecated
      public java.util.Map<java.lang.String, java.lang.Long>
          getMutableNamespaceStateIds() {
        bitField0_ |= 0x00000001;
        return internalGetMutableNamespaceStateIds().getMutableMap();
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      public Builder putNamespaceStateIds(
          java.lang.String key,
          long value) {
        if (key == null) { throw new NullPointerException("map key"); }

        internalGetMutableNamespaceStateIds().getMutableMap()
            .put(key, value);
        bitField0_ |= 0x00000001;
        return this;
      }
      /**
       * <pre>
       * Last seen state IDs for multiple namespaces.
       * </pre>
       *
       * <code>map&lt;string, int64&gt; namespaceStateIds = 1;</code>
       */
      public Builder putAllNamespaceStateIds(
          java.util.Map<java.lang.String, java.lang.Long> values) {
        internalGetMutableNamespaceStateIds().getMutableMap()
            .putAll(values);
        bitField0_ |= 0x00000001;
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RouterFederatedStateProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.RouterFederatedStateProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<RouterFederatedStateProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<RouterFederatedStateProto>() {
      @java.lang.Override
      public RouterFederatedStateProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<RouterFederatedStateProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<RouterFederatedStateProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RouterFederatedStateProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeIDProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_StorageReportProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ContentSummaryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_QuotaUsageProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_StorageTypesProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_LocatedBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_CipherOptionProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ECSchemaProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DirectoryListingProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotListingProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_StorageUuidsProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_fieldAccessorTable;

  public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static  org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\nhdfs.proto\022\013hadoop.hdfs\032\016Security.prot" +
      "o\032\tacl.proto\"c\n\022ExtendedBlockProto\022\016\n\006po" +
      "olId\030\001 \002(\t\022\017\n\007blockId\030\002 \002(\004\022\027\n\017generatio" +
      "nStamp\030\003 \002(\004\022\023\n\010numBytes\030\004 \001(\004:\0010\"[\n\034Pro" +
      "videdStorageLocationProto\022\014\n\004path\030\001 \002(\t\022" +
      "\016\n\006offset\030\002 \002(\003\022\016\n\006length\030\003 \002(\003\022\r\n\005nonce" +
      "\030\004 \002(\014\"\231\001\n\017DatanodeIDProto\022\016\n\006ipAddr\030\001 \002" +
      "(\t\022\020\n\010hostName\030\002 \002(\t\022\024\n\014datanodeUuid\030\003 \002" +
      "(\t\022\020\n\010xferPort\030\004 \002(\r\022\020\n\010infoPort\030\005 \002(\r\022\017" +
      "\n\007ipcPort\030\006 \002(\r\022\031\n\016infoSecurePort\030\007 \001(\r:" +
      "\0010\"X\n\026DatanodeLocalInfoProto\022\027\n\017software" +
      "Version\030\001 \002(\t\022\025\n\rconfigVersion\030\002 \002(\t\022\016\n\006" +
      "uptime\030\003 \002(\004\"\315\001\n\027DatanodeVolumeInfoProto" +
      "\022\014\n\004path\030\001 \002(\t\0222\n\013storageType\030\002 \002(\0162\035.ha" +
      "doop.hdfs.StorageTypeProto\022\021\n\tusedSpace\030" +
      "\003 \002(\004\022\021\n\tfreeSpace\030\004 \002(\004\022\025\n\rreservedSpac" +
      "e\030\005 \002(\004\022 \n\030reservedSpaceForReplicas\030\006 \002(" +
      "\004\022\021\n\tnumBlocks\030\007 \002(\004\"G\n\022DatanodeInfosPro" +
      "to\0221\n\tdatanodes\030\001 \003(\0132\036.hadoop.hdfs.Data" +
      "nodeInfoProto\"\207\005\n\021DatanodeInfoProto\022(\n\002i" +
      "d\030\001 \002(\0132\034.hadoop.hdfs.DatanodeIDProto\022\023\n" +
      "\010capacity\030\002 \001(\004:\0010\022\022\n\007dfsUsed\030\003 \001(\004:\0010\022\024" +
      "\n\tremaining\030\004 \001(\004:\0010\022\030\n\rblockPoolUsed\030\005 " +
      "\001(\004:\0010\022\025\n\nlastUpdate\030\006 \001(\004:\0010\022\027\n\014xceiver" +
      "Count\030\007 \001(\r:\0010\022\020\n\010location\030\010 \001(\t\022\022\n\nnonD" +
      "fsUsed\030\t \001(\004\022E\n\nadminState\030\n \001(\0162).hadoo" +
      "p.hdfs.DatanodeInfoProto.AdminState:\006NOR" +
      "MAL\022\030\n\rcacheCapacity\030\013 \001(\004:\0010\022\024\n\tcacheUs" +
      "ed\030\014 \001(\004:\0010\022\036\n\023lastUpdateMonotonic\030\r \001(\004" +
      ":\0010\022\025\n\rupgradeDomain\030\016 \001(\t\022\036\n\023lastBlockR" +
      "eportTime\030\017 \001(\004:\0010\022#\n\030lastBlockReportMon" +
      "otonic\030\020 \001(\004:\0010\022\024\n\tnumBlocks\030\021 \001(\r:\0010\022\027\n" +
      "\017softwareVersion\030\022 \001(\t\"w\n\nAdminState\022\n\n\006" +
      "NORMAL\020\000\022\033\n\027DECOMMISSION_INPROGRESS\020\001\022\022\n" +
      "\016DECOMMISSIONED\020\002\022\030\n\024ENTERING_MAINTENANC" +
      "E\020\003\022\022\n\016IN_MAINTENANCE\020\004\"\336\001\n\024DatanodeStor" +
      "ageProto\022\023\n\013storageUuid\030\001 \002(\t\022E\n\005state\030\002" +
      " \001(\0162..hadoop.hdfs.DatanodeStorageProto." +
      "StorageState:\006NORMAL\0228\n\013storageType\030\003 \001(" +
      "\0162\035.hadoop.hdfs.StorageTypeProto:\004DISK\"0" +
      "\n\014StorageState\022\n\n\006NORMAL\020\000\022\024\n\020READ_ONLY_" +
      "SHARED\020\001\"\364\001\n\022StorageReportProto\022\027\n\013stora" +
      "geUuid\030\001 \002(\tB\002\030\001\022\025\n\006failed\030\002 \001(\010:\005false\022" +
      "\023\n\010capacity\030\003 \001(\004:\0010\022\022\n\007dfsUsed\030\004 \001(\004:\0010" +
      "\022\024\n\tremaining\030\005 \001(\004:\0010\022\030\n\rblockPoolUsed\030" +
      "\006 \001(\004:\0010\0222\n\007storage\030\007 \001(\0132!.hadoop.hdfs." +
      "DatanodeStorageProto\022\022\n\nnonDfsUsed\030\010 \001(\004" +
      "\022\r\n\005mount\030\t \001(\t\"\332\002\n\023ContentSummaryProto\022" +
      "\016\n\006length\030\001 \002(\004\022\021\n\tfileCount\030\002 \002(\004\022\026\n\016di" +
      "rectoryCount\030\003 \002(\004\022\r\n\005quota\030\004 \002(\004\022\025\n\rspa" +
      "ceConsumed\030\005 \002(\004\022\022\n\nspaceQuota\030\006 \002(\004\022?\n\016" +
      "typeQuotaInfos\030\007 \001(\0132\'.hadoop.hdfs.Stora" +
      "geTypeQuotaInfosProto\022\026\n\016snapshotLength\030" +
      "\010 \001(\004\022\031\n\021snapshotFileCount\030\t \001(\004\022\036\n\026snap" +
      "shotDirectoryCount\030\n \001(\004\022\035\n\025snapshotSpac" +
      "eConsumed\030\013 \001(\004\022\033\n\023erasureCodingPolicy\030\014" +
      " \001(\t\"\253\001\n\017QuotaUsageProto\022\035\n\025fileAndDirec" +
      "toryCount\030\001 \002(\004\022\r\n\005quota\030\002 \002(\004\022\025\n\rspaceC" +
      "onsumed\030\003 \002(\004\022\022\n\nspaceQuota\030\004 \002(\004\022?\n\016typ" +
      "eQuotaInfos\030\005 \001(\0132\'.hadoop.hdfs.StorageT" +
      "ypeQuotaInfosProto\"[\n\032StorageTypeQuotaIn" +
      "fosProto\022=\n\rtypeQuotaInfo\030\001 \003(\0132&.hadoop" +
      ".hdfs.StorageTypeQuotaInfoProto\"o\n\031Stora" +
      "geTypeQuotaInfoProto\0221\n\004type\030\001 \001(\0162\035.had" +
      "oop.hdfs.StorageTypeProto:\004DISK\022\r\n\005quota" +
      "\030\002 \002(\004\022\020\n\010consumed\030\003 \002(\004\"7\n\026CorruptFileB" +
      "locksProto\022\r\n\005files\030\001 \003(\t\022\016\n\006cookie\030\002 \002(" +
      "\t\"H\n\021StorageTypesProto\0223\n\014storageTypes\030\001" +
      " \003(\0162\035.hadoop.hdfs.StorageTypeProto\"\364\001\n\027" +
      "BlockStoragePolicyProto\022\020\n\010policyId\030\001 \002(" +
      "\r\022\014\n\004name\030\002 \002(\t\0226\n\016creationPolicy\030\003 \002(\0132" +
      "\036.hadoop.hdfs.StorageTypesProto\022>\n\026creat" +
      "ionFallbackPolicy\030\004 \001(\0132\036.hadoop.hdfs.St" +
      "orageTypesProto\022A\n\031replicationFallbackPo" +
      "licy\030\005 \001(\0132\036.hadoop.hdfs.StorageTypesPro" +
      "to\"\342\002\n\021LocatedBlockProto\022*\n\001b\030\001 \002(\0132\037.ha" +
      "doop.hdfs.ExtendedBlockProto\022\016\n\006offset\030\002" +
      " \002(\004\022,\n\004locs\030\003 \003(\0132\036.hadoop.hdfs.Datanod" +
      "eInfoProto\022\017\n\007corrupt\030\004 \002(\010\022-\n\nblockToke" +
      "n\030\005 \002(\0132\031.hadoop.common.TokenProto\022\024\n\010is" +
      "Cached\030\006 \003(\010B\002\020\001\0223\n\014storageTypes\030\007 \003(\0162\035" +
      ".hadoop.hdfs.StorageTypeProto\022\022\n\nstorage" +
      "IDs\030\010 \003(\t\022\024\n\014blockIndices\030\t \001(\014\022.\n\013block" +
      "Tokens\030\n \003(\0132\031.hadoop.common.TokenProto\"" +
      "Q\n\026BatchedListingKeyProto\022\020\n\010checksum\030\001 " +
      "\002(\014\022\021\n\tpathIndex\030\002 \002(\r\022\022\n\nstartAfter\030\003 \002" +
      "(\014\"\223\001\n\026DataEncryptionKeyProto\022\r\n\005keyId\030\001" +
      " \002(\r\022\023\n\013blockPoolId\030\002 \002(\t\022\r\n\005nonce\030\003 \002(\014" +
      "\022\025\n\rencryptionKey\030\004 \002(\014\022\022\n\nexpiryDate\030\005 " +
      "\002(\004\022\033\n\023encryptionAlgorithm\030\006 \001(\t\"\323\001\n\027Fil" +
      "eEncryptionInfoProto\022,\n\005suite\030\001 \002(\0162\035.ha" +
      "doop.hdfs.CipherSuiteProto\022F\n\025cryptoProt" +
      "ocolVersion\030\002 \002(\0162\'.hadoop.hdfs.CryptoPr" +
      "otocolVersionProto\022\013\n\003key\030\003 \002(\014\022\n\n\002iv\030\004 " +
      "\002(\014\022\017\n\007keyName\030\005 \002(\t\022\030\n\020ezKeyVersionName" +
      "\030\006 \002(\t\"O\n\032PerFileEncryptionInfoProto\022\013\n\003" +
      "key\030\001 \002(\014\022\n\n\002iv\030\002 \002(\014\022\030\n\020ezKeyVersionNam" +
      "e\030\003 \002(\t\"\337\001\n\027ZoneEncryptionInfoProto\022,\n\005s" +
      "uite\030\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProt" +
      "o\022F\n\025cryptoProtocolVersion\030\002 \002(\0162\'.hadoo" +
      "p.hdfs.CryptoProtocolVersionProto\022\017\n\007key" +
      "Name\030\003 \002(\t\022=\n\021reencryptionProto\030\004 \001(\0132\"." +
      "hadoop.hdfs.ReencryptionInfoProto\"\262\001\n\025Re" +
      "encryptionInfoProto\022\030\n\020ezKeyVersionName\030" +
      "\001 \002(\t\022\026\n\016submissionTime\030\002 \002(\004\022\020\n\010cancele" +
      "d\030\003 \002(\010\022\026\n\016numReencrypted\030\004 \002(\003\022\023\n\013numFa" +
      "ilures\030\005 \002(\003\022\026\n\016completionTime\030\006 \001(\004\022\020\n\010" +
      "lastFile\030\007 \001(\t\"}\n\021CipherOptionProto\022,\n\005s" +
      "uite\030\001 \002(\0162\035.hadoop.hdfs.CipherSuiteProt" +
      "o\022\r\n\005inKey\030\002 \001(\014\022\014\n\004inIv\030\003 \001(\014\022\016\n\006outKey" +
      "\030\004 \001(\014\022\r\n\005outIv\030\005 \001(\014\"\276\002\n\022LocatedBlocksP" +
      "roto\022\022\n\nfileLength\030\001 \002(\004\022.\n\006blocks\030\002 \003(\013" +
      "2\036.hadoop.hdfs.LocatedBlockProto\022\031\n\021unde" +
      "rConstruction\030\003 \002(\010\0221\n\tlastBlock\030\004 \001(\0132\036" +
      ".hadoop.hdfs.LocatedBlockProto\022\033\n\023isLast" +
      "BlockComplete\030\005 \002(\010\022@\n\022fileEncryptionInf" +
      "o\030\006 \001(\0132$.hadoop.hdfs.FileEncryptionInfo" +
      "Proto\0227\n\010ecPolicy\030\007 \001(\0132%.hadoop.hdfs.Er" +
      "asureCodingPolicyProto\"6\n\030ECSchemaOption" +
      "EntryProto\022\013\n\003key\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"\202" +
      "\001\n\rECSchemaProto\022\021\n\tcodecName\030\001 \002(\t\022\021\n\td" +
      "ataUnits\030\002 \002(\r\022\023\n\013parityUnits\030\003 \002(\r\0226\n\007o" +
      "ptions\030\004 \003(\0132%.hadoop.hdfs.ECSchemaOptio" +
      "nEntryProto\"\261\001\n\030ErasureCodingPolicyProto" +
      "\022\014\n\004name\030\001 \001(\t\022*\n\006schema\030\002 \001(\0132\032.hadoop." +
      "hdfs.ECSchemaProto\022\020\n\010cellSize\030\003 \001(\r\022\n\n\002" +
      "id\030\004 \002(\r\022=\n\005state\030\005 \001(\0162%.hadoop.hdfs.Er" +
      "asureCodingPolicyState:\007ENABLED\"\177\n#AddEr" +
      "asureCodingPolicyResponseProto\0225\n\006policy" +
      "\030\001 \002(\0132%.hadoop.hdfs.ErasureCodingPolicy" +
      "Proto\022\017\n\007succeed\030\002 \002(\010\022\020\n\010errorMsg\030\003 \001(\t" +
      "\"K\n\035ECTopologyVerifierResultProto\022\025\n\rres" +
      "ultMessage\030\001 \002(\t\022\023\n\013isSupported\030\002 \002(\010\"C\n" +
      "\023HdfsPathHandleProto\022\017\n\007inodeId\030\001 \001(\004\022\r\n" +
      "\005mtime\030\002 \001(\004\022\014\n\004path\030\003 \001(\t\"\315\005\n\023HdfsFileS" +
      "tatusProto\022;\n\010fileType\030\001 \002(\0162).hadoop.hd" +
      "fs.HdfsFileStatusProto.FileType\022\014\n\004path\030" +
      "\002 \002(\014\022\016\n\006length\030\003 \002(\004\0222\n\npermission\030\004 \002(" +
      "\0132\036.hadoop.hdfs.FsPermissionProto\022\r\n\005own" +
      "er\030\005 \002(\t\022\r\n\005group\030\006 \002(\t\022\031\n\021modification_" +
      "time\030\007 \002(\004\022\023\n\013access_time\030\010 \002(\004\022\017\n\007symli" +
      "nk\030\t \001(\014\022\034\n\021block_replication\030\n \001(\r:\0010\022\024" +
      "\n\tblocksize\030\013 \001(\004:\0010\0222\n\tlocations\030\014 \001(\0132" +
      "\037.hadoop.hdfs.LocatedBlocksProto\022\021\n\006file" +
      "Id\030\r \001(\004:\0010\022\027\n\013childrenNum\030\016 \001(\005:\002-1\022@\n\022" +
      "fileEncryptionInfo\030\017 \001(\0132$.hadoop.hdfs.F" +
      "ileEncryptionInfoProto\022\030\n\rstoragePolicy\030" +
      "\020 \001(\r:\0010\0227\n\010ecPolicy\030\021 \001(\0132%.hadoop.hdfs" +
      ".ErasureCodingPolicyProto\022\020\n\005flags\030\022 \001(\r" +
      ":\0010\022\021\n\tnamespace\030\023 \001(\t\"3\n\010FileType\022\n\n\006IS" +
      "_DIR\020\001\022\013\n\007IS_FILE\020\002\022\016\n\nIS_SYMLINK\020\003\"E\n\005F" +
      "lags\022\013\n\007HAS_ACL\020\001\022\r\n\tHAS_CRYPT\020\002\022\n\n\006HAS_" +
      "EC\020\004\022\024\n\020SNAPSHOT_ENABLED\020\010\"y\n\031BlockCheck" +
      "sumOptionsProto\022F\n\021blockChecksumType\030\001 \001" +
      "(\0162#.hadoop.hdfs.BlockChecksumTypeProto:" +
      "\006MD5CRC\022\024\n\014stripeLength\030\002 \001(\004\"\344\002\n\025FsServ" +
      "erDefaultsProto\022\021\n\tblockSize\030\001 \002(\004\022\030\n\020by" +
      "tesPerChecksum\030\002 \002(\r\022\027\n\017writePacketSize\030" +
      "\003 \002(\r\022\023\n\013replication\030\004 \002(\r\022\026\n\016fileBuffer" +
      "Size\030\005 \002(\r\022\"\n\023encryptDataTransfer\030\006 \001(\010:" +
      "\005false\022\030\n\rtrashInterval\030\007 \001(\004:\0010\022D\n\014chec" +
      "ksumType\030\010 \001(\0162\036.hadoop.hdfs.ChecksumTyp" +
      "eProto:\016CHECKSUM_CRC32\022\026\n\016keyProviderUri" +
      "\030\t \001(\t\022\023\n\010policyId\030\n \001(\r:\0010\022\'\n\030snapshotT" +
      "rashRootEnabled\030\013 \001(\010:\005false\"k\n\025Director" +
      "yListingProto\0228\n\016partialListing\030\001 \003(\0132 ." +
      "hadoop.hdfs.HdfsFileStatusProto\022\030\n\020remai" +
      "ningEntries\030\002 \002(\r\":\n\024RemoteExceptionProt" +
      "o\022\021\n\tclassName\030\001 \002(\t\022\017\n\007message\030\002 \001(\t\"\241\001" +
      "\n\034BatchedDirectoryListingProto\0228\n\016partia" +
      "lListing\030\001 \003(\0132 .hadoop.hdfs.HdfsFileSta" +
      "tusProto\022\021\n\tparentIdx\030\002 \002(\r\0224\n\texception" +
      "\030\003 \001(\0132!.hadoop.hdfs.RemoteExceptionProt" +
      "o\"\242\001\n!SnapshottableDirectoryStatusProto\022" +
      "3\n\tdirStatus\030\001 \002(\0132 .hadoop.hdfs.HdfsFil" +
      "eStatusProto\022\026\n\016snapshot_quota\030\002 \002(\r\022\027\n\017" +
      "snapshot_number\030\003 \002(\r\022\027\n\017parent_fullpath" +
      "\030\004 \002(\014\"\212\001\n\023SnapshotStatusProto\0223\n\tdirSta" +
      "tus\030\001 \002(\0132 .hadoop.hdfs.HdfsFileStatusPr" +
      "oto\022\022\n\nsnapshotID\030\002 \002(\r\022\027\n\017parent_fullpa" +
      "th\030\003 \002(\014\022\021\n\tisDeleted\030\004 \002(\010\"u\n\"Snapshott" +
      "ableDirectoryListingProto\022O\n\027snapshottab" +
      "leDirListing\030\001 \003(\0132..hadoop.hdfs.Snapsho" +
      "ttableDirectoryStatusProto\"Q\n\024SnapshotLi" +
      "stingProto\0229\n\017snapshotListing\030\001 \003(\0132 .ha" +
      "doop.hdfs.SnapshotStatusProto\"_\n\034Snapsho" +
      "tDiffReportEntryProto\022\020\n\010fullpath\030\001 \002(\014\022" +
      "\031\n\021modificationLabel\030\002 \002(\t\022\022\n\ntargetPath" +
      "\030\003 \001(\014\"\237\001\n\027SnapshotDiffReportProto\022\024\n\014sn" +
      "apshotRoot\030\001 \002(\t\022\024\n\014fromSnapshot\030\002 \002(\t\022\022" +
      "\n\ntoSnapshot\030\003 \002(\t\022D\n\021diffReportEntries\030" +
      "\004 \003(\0132).hadoop.hdfs.SnapshotDiffReportEn" +
      "tryProto\"\177\n#SnapshotDiffReportListingEnt" +
      "ryProto\022\020\n\010fullpath\030\001 \002(\014\022\r\n\005dirId\030\002 \002(\004" +
      "\022\023\n\013isReference\030\003 \002(\010\022\022\n\ntargetPath\030\004 \001(" +
      "\014\022\016\n\006fileId\030\005 \001(\004\"E\n\035SnapshotDiffReportC" +
      "ursorProto\022\021\n\tstartPath\030\001 \002(\014\022\021\n\005index\030\002" +
      " \002(\005:\002-1\"\322\002\n\036SnapshotDiffReportListingPr" +
      "oto\022I\n\017modifiedEntries\030\001 \003(\01320.hadoop.hd" +
      "fs.SnapshotDiffReportListingEntryProto\022H" +
      "\n\016createdEntries\030\002 \003(\01320.hadoop.hdfs.Sna" +
      "pshotDiffReportListingEntryProto\022H\n\016dele" +
      "tedEntries\030\003 \003(\01320.hadoop.hdfs.SnapshotD" +
      "iffReportListingEntryProto\022\025\n\risFromEarl" +
      "ier\030\004 \002(\010\022:\n\006cursor\030\005 \001(\0132*.hadoop.hdfs." +
      "SnapshotDiffReportCursorProto\"D\n\nBlockPr" +
      "oto\022\017\n\007blockId\030\001 \002(\004\022\020\n\010genStamp\030\002 \002(\004\022\023" +
      "\n\010numBytes\030\003 \001(\004:\0010\"\245\001\n\021SnapshotInfoProt" +
      "o\022\024\n\014snapshotName\030\001 \002(\t\022\024\n\014snapshotRoot\030" +
      "\002 \002(\t\0222\n\npermission\030\003 \002(\0132\036.hadoop.hdfs." +
      "FsPermissionProto\022\r\n\005owner\030\004 \002(\t\022\r\n\005grou" +
      "p\030\005 \002(\t\022\022\n\ncreateTime\030\006 \002(\t\"J\n\031RollingUp" +
      "gradeStatusProto\022\023\n\013blockPoolId\030\001 \002(\t\022\030\n" +
      "\tfinalized\030\002 \001(\010:\005false\")\n\021StorageUuidsP" +
      "roto\022\024\n\014storageUuids\030\001 \003(\t\"\377\001\n\025BlockToke" +
      "nSecretProto\022\022\n\nexpiryDate\030\001 \001(\004\022\r\n\005keyI" +
      "d\030\002 \001(\r\022\016\n\006userId\030\003 \001(\t\022\023\n\013blockPoolId\030\004" +
      " \001(\t\022\017\n\007blockId\030\005 \001(\004\022+\n\005modes\030\006 \003(\0162\034.h" +
      "adoop.hdfs.AccessModeProto\0223\n\014storageTyp" +
      "es\030\007 \003(\0162\035.hadoop.hdfs.StorageTypeProto\022" +
      "\022\n\nstorageIds\030\010 \003(\t\022\027\n\017handshakeSecret\030\t" +
      " \001(\014\"\257\001\n\031RouterFederatedStateProto\022X\n\021na" +
      "mespaceStateIds\030\001 \003(\0132=.hadoop.hdfs.Rout" +
      "erFederatedStateProto.NamespaceStateIdsE" +
      "ntry\0328\n\026NamespaceStateIdsEntry\022\013\n\003key\030\001 " +
      "\001(\t\022\r\n\005value\030\002 \001(\003:\0028\001*Z\n\020StorageTypePro" +
      "to\022\010\n\004DISK\020\001\022\007\n\003SSD\020\002\022\013\n\007ARCHIVE\020\003\022\014\n\010RA" +
      "M_DISK\020\004\022\014\n\010PROVIDED\020\005\022\n\n\006NVDIMM\020\006*-\n\016Bl" +
      "ockTypeProto\022\016\n\nCONTIGUOUS\020\000\022\013\n\007STRIPED\020" +
      "\001*M\n\020CipherSuiteProto\022\013\n\007UNKNOWN\020\001\022\025\n\021AE" +
      "S_CTR_NOPADDING\020\002\022\025\n\021SM4_CTR_NOPADDING\020\003" +
      "*P\n\032CryptoProtocolVersionProto\022\034\n\030UNKNOW" +
      "N_PROTOCOL_VERSION\020\001\022\024\n\020ENCRYPTION_ZONES" +
      "\020\002*B\n\030ErasureCodingPolicyState\022\014\n\010DISABL" +
      "ED\020\001\022\013\n\007ENABLED\020\002\022\013\n\007REMOVED\020\003*O\n\021Checks" +
      "umTypeProto\022\021\n\rCHECKSUM_NULL\020\000\022\022\n\016CHECKS" +
      "UM_CRC32\020\001\022\023\n\017CHECKSUM_CRC32C\020\002*7\n\026Block" +
      "ChecksumTypeProto\022\n\n\006MD5CRC\020\001\022\021\n\rCOMPOSI" +
      "TE_CRC\020\002*=\n\017AccessModeProto\022\010\n\004READ\020\001\022\t\n" +
      "\005WRITE\020\002\022\010\n\004COPY\020\003\022\013\n\007REPLACE\020\004B6\n%org.a" +
      "pache.hadoop.hdfs.protocol.protoB\nHdfsPr" +
      "otos\240\001\001"
    };
    descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor(),
        });
    internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(0);
    internal_static_hadoop_hdfs_ExtendedBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ExtendedBlockProto_descriptor,
        new java.lang.String[] { "PoolId", "BlockId", "GenerationStamp", "NumBytes", });
    internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor =
      getDescriptor().getMessageTypes().get(1);
    internal_static_hadoop_hdfs_ProvidedStorageLocationProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ProvidedStorageLocationProto_descriptor,
        new java.lang.String[] { "Path", "Offset", "Length", "Nonce", });
    internal_static_hadoop_hdfs_DatanodeIDProto_descriptor =
      getDescriptor().getMessageTypes().get(2);
    internal_static_hadoop_hdfs_DatanodeIDProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeIDProto_descriptor,
        new java.lang.String[] { "IpAddr", "HostName", "DatanodeUuid", "XferPort", "InfoPort", "IpcPort", "InfoSecurePort", });
    internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(3);
    internal_static_hadoop_hdfs_DatanodeLocalInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeLocalInfoProto_descriptor,
        new java.lang.String[] { "SoftwareVersion", "ConfigVersion", "Uptime", });
    internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(4);
    internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeVolumeInfoProto_descriptor,
        new java.lang.String[] { "Path", "StorageType", "UsedSpace", "FreeSpace", "ReservedSpace", "ReservedSpaceForReplicas", "NumBlocks", });
    internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor =
      getDescriptor().getMessageTypes().get(5);
    internal_static_hadoop_hdfs_DatanodeInfosProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeInfosProto_descriptor,
        new java.lang.String[] { "Datanodes", });
    internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(6);
    internal_static_hadoop_hdfs_DatanodeInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeInfoProto_descriptor,
        new java.lang.String[] { "Id", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "LastUpdate", "XceiverCount", "Location", "NonDfsUsed", "AdminState", "CacheCapacity", "CacheUsed", "LastUpdateMonotonic", "UpgradeDomain", "LastBlockReportTime", "LastBlockReportMonotonic", "NumBlocks", "SoftwareVersion", });
    internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor =
      getDescriptor().getMessageTypes().get(7);
    internal_static_hadoop_hdfs_DatanodeStorageProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DatanodeStorageProto_descriptor,
        new java.lang.String[] { "StorageUuid", "State", "StorageType", });
    internal_static_hadoop_hdfs_StorageReportProto_descriptor =
      getDescriptor().getMessageTypes().get(8);
    internal_static_hadoop_hdfs_StorageReportProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_StorageReportProto_descriptor,
        new java.lang.String[] { "StorageUuid", "Failed", "Capacity", "DfsUsed", "Remaining", "BlockPoolUsed", "Storage", "NonDfsUsed", "Mount", });
    internal_static_hadoop_hdfs_ContentSummaryProto_descriptor =
      getDescriptor().getMessageTypes().get(9);
    internal_static_hadoop_hdfs_ContentSummaryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ContentSummaryProto_descriptor,
        new java.lang.String[] { "Length", "FileCount", "DirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", "SnapshotLength", "SnapshotFileCount", "SnapshotDirectoryCount", "SnapshotSpaceConsumed", "ErasureCodingPolicy", });
    internal_static_hadoop_hdfs_QuotaUsageProto_descriptor =
      getDescriptor().getMessageTypes().get(10);
    internal_static_hadoop_hdfs_QuotaUsageProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_QuotaUsageProto_descriptor,
        new java.lang.String[] { "FileAndDirectoryCount", "Quota", "SpaceConsumed", "SpaceQuota", "TypeQuotaInfos", });
    internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor =
      getDescriptor().getMessageTypes().get(11);
    internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_StorageTypeQuotaInfosProto_descriptor,
        new java.lang.String[] { "TypeQuotaInfo", });
    internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(12);
    internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_StorageTypeQuotaInfoProto_descriptor,
        new java.lang.String[] { "Type", "Quota", "Consumed", });
    internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor =
      getDescriptor().getMessageTypes().get(13);
    internal_static_hadoop_hdfs_CorruptFileBlocksProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_CorruptFileBlocksProto_descriptor,
        new java.lang.String[] { "Files", "Cookie", });
    internal_static_hadoop_hdfs_StorageTypesProto_descriptor =
      getDescriptor().getMessageTypes().get(14);
    internal_static_hadoop_hdfs_StorageTypesProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_StorageTypesProto_descriptor,
        new java.lang.String[] { "StorageTypes", });
    internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor =
      getDescriptor().getMessageTypes().get(15);
    internal_static_hadoop_hdfs_BlockStoragePolicyProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BlockStoragePolicyProto_descriptor,
        new java.lang.String[] { "PolicyId", "Name", "CreationPolicy", "CreationFallbackPolicy", "ReplicationFallbackPolicy", });
    internal_static_hadoop_hdfs_LocatedBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(16);
    internal_static_hadoop_hdfs_LocatedBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_LocatedBlockProto_descriptor,
        new java.lang.String[] { "B", "Offset", "Locs", "Corrupt", "BlockToken", "IsCached", "StorageTypes", "StorageIDs", "BlockIndices", "BlockTokens", });
    internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor =
      getDescriptor().getMessageTypes().get(17);
    internal_static_hadoop_hdfs_BatchedListingKeyProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BatchedListingKeyProto_descriptor,
        new java.lang.String[] { "Checksum", "PathIndex", "StartAfter", });
    internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor =
      getDescriptor().getMessageTypes().get(18);
    internal_static_hadoop_hdfs_DataEncryptionKeyProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DataEncryptionKeyProto_descriptor,
        new java.lang.String[] { "KeyId", "BlockPoolId", "Nonce", "EncryptionKey", "ExpiryDate", "EncryptionAlgorithm", });
    internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(19);
    internal_static_hadoop_hdfs_FileEncryptionInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_FileEncryptionInfoProto_descriptor,
        new java.lang.String[] { "Suite", "CryptoProtocolVersion", "Key", "Iv", "KeyName", "EzKeyVersionName", });
    internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(20);
    internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_PerFileEncryptionInfoProto_descriptor,
        new java.lang.String[] { "Key", "Iv", "EzKeyVersionName", });
    internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(21);
    internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ZoneEncryptionInfoProto_descriptor,
        new java.lang.String[] { "Suite", "CryptoProtocolVersion", "KeyName", "ReencryptionProto", });
    internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(22);
    internal_static_hadoop_hdfs_ReencryptionInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ReencryptionInfoProto_descriptor,
        new java.lang.String[] { "EzKeyVersionName", "SubmissionTime", "Canceled", "NumReencrypted", "NumFailures", "CompletionTime", "LastFile", });
    internal_static_hadoop_hdfs_CipherOptionProto_descriptor =
      getDescriptor().getMessageTypes().get(23);
    internal_static_hadoop_hdfs_CipherOptionProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_CipherOptionProto_descriptor,
        new java.lang.String[] { "Suite", "InKey", "InIv", "OutKey", "OutIv", });
    internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor =
      getDescriptor().getMessageTypes().get(24);
    internal_static_hadoop_hdfs_LocatedBlocksProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_LocatedBlocksProto_descriptor,
        new java.lang.String[] { "FileLength", "Blocks", "UnderConstruction", "LastBlock", "IsLastBlockComplete", "FileEncryptionInfo", "EcPolicy", });
    internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor =
      getDescriptor().getMessageTypes().get(25);
    internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ECSchemaOptionEntryProto_descriptor,
        new java.lang.String[] { "Key", "Value", });
    internal_static_hadoop_hdfs_ECSchemaProto_descriptor =
      getDescriptor().getMessageTypes().get(26);
    internal_static_hadoop_hdfs_ECSchemaProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ECSchemaProto_descriptor,
        new java.lang.String[] { "CodecName", "DataUnits", "ParityUnits", "Options", });
    internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor =
      getDescriptor().getMessageTypes().get(27);
    internal_static_hadoop_hdfs_ErasureCodingPolicyProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ErasureCodingPolicyProto_descriptor,
        new java.lang.String[] { "Name", "Schema", "CellSize", "Id", "State", });
    internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(28);
    internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_AddErasureCodingPolicyResponseProto_descriptor,
        new java.lang.String[] { "Policy", "Succeed", "ErrorMsg", });
    internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor =
      getDescriptor().getMessageTypes().get(29);
    internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ECTopologyVerifierResultProto_descriptor,
        new java.lang.String[] { "ResultMessage", "IsSupported", });
    internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor =
      getDescriptor().getMessageTypes().get(30);
    internal_static_hadoop_hdfs_HdfsPathHandleProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_HdfsPathHandleProto_descriptor,
        new java.lang.String[] { "InodeId", "Mtime", "Path", });
    internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor =
      getDescriptor().getMessageTypes().get(31);
    internal_static_hadoop_hdfs_HdfsFileStatusProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_HdfsFileStatusProto_descriptor,
        new java.lang.String[] { "FileType", "Path", "Length", "Permission", "Owner", "Group", "ModificationTime", "AccessTime", "Symlink", "BlockReplication", "Blocksize", "Locations", "FileId", "ChildrenNum", "FileEncryptionInfo", "StoragePolicy", "EcPolicy", "Flags", "Namespace", });
    internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor =
      getDescriptor().getMessageTypes().get(32);
    internal_static_hadoop_hdfs_BlockChecksumOptionsProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BlockChecksumOptionsProto_descriptor,
        new java.lang.String[] { "BlockChecksumType", "StripeLength", });
    internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor =
      getDescriptor().getMessageTypes().get(33);
    internal_static_hadoop_hdfs_FsServerDefaultsProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_FsServerDefaultsProto_descriptor,
        new java.lang.String[] { "BlockSize", "BytesPerChecksum", "WritePacketSize", "Replication", "FileBufferSize", "EncryptDataTransfer", "TrashInterval", "ChecksumType", "KeyProviderUri", "PolicyId", "SnapshotTrashRootEnabled", });
    internal_static_hadoop_hdfs_DirectoryListingProto_descriptor =
      getDescriptor().getMessageTypes().get(34);
    internal_static_hadoop_hdfs_DirectoryListingProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DirectoryListingProto_descriptor,
        new java.lang.String[] { "PartialListing", "RemainingEntries", });
    internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor =
      getDescriptor().getMessageTypes().get(35);
    internal_static_hadoop_hdfs_RemoteExceptionProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_RemoteExceptionProto_descriptor,
        new java.lang.String[] { "ClassName", "Message", });
    internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor =
      getDescriptor().getMessageTypes().get(36);
    internal_static_hadoop_hdfs_BatchedDirectoryListingProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BatchedDirectoryListingProto_descriptor,
        new java.lang.String[] { "PartialListing", "ParentIdx", "Exception", });
    internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor =
      getDescriptor().getMessageTypes().get(37);
    internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshottableDirectoryStatusProto_descriptor,
        new java.lang.String[] { "DirStatus", "SnapshotQuota", "SnapshotNumber", "ParentFullpath", });
    internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor =
      getDescriptor().getMessageTypes().get(38);
    internal_static_hadoop_hdfs_SnapshotStatusProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotStatusProto_descriptor,
        new java.lang.String[] { "DirStatus", "SnapshotID", "ParentFullpath", "IsDeleted", });
    internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor =
      getDescriptor().getMessageTypes().get(39);
    internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshottableDirectoryListingProto_descriptor,
        new java.lang.String[] { "SnapshottableDirListing", });
    internal_static_hadoop_hdfs_SnapshotListingProto_descriptor =
      getDescriptor().getMessageTypes().get(40);
    internal_static_hadoop_hdfs_SnapshotListingProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotListingProto_descriptor,
        new java.lang.String[] { "SnapshotListing", });
    internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor =
      getDescriptor().getMessageTypes().get(41);
    internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotDiffReportEntryProto_descriptor,
        new java.lang.String[] { "Fullpath", "ModificationLabel", "TargetPath", });
    internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor =
      getDescriptor().getMessageTypes().get(42);
    internal_static_hadoop_hdfs_SnapshotDiffReportProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotDiffReportProto_descriptor,
        new java.lang.String[] { "SnapshotRoot", "FromSnapshot", "ToSnapshot", "DiffReportEntries", });
    internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor =
      getDescriptor().getMessageTypes().get(43);
    internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotDiffReportListingEntryProto_descriptor,
        new java.lang.String[] { "Fullpath", "DirId", "IsReference", "TargetPath", "FileId", });
    internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor =
      getDescriptor().getMessageTypes().get(44);
    internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotDiffReportCursorProto_descriptor,
        new java.lang.String[] { "StartPath", "Index", });
    internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor =
      getDescriptor().getMessageTypes().get(45);
    internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotDiffReportListingProto_descriptor,
        new java.lang.String[] { "ModifiedEntries", "CreatedEntries", "DeletedEntries", "IsFromEarlier", "Cursor", });
    internal_static_hadoop_hdfs_BlockProto_descriptor =
      getDescriptor().getMessageTypes().get(46);
    internal_static_hadoop_hdfs_BlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BlockProto_descriptor,
        new java.lang.String[] { "BlockId", "GenStamp", "NumBytes", });
    internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(47);
    internal_static_hadoop_hdfs_SnapshotInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_SnapshotInfoProto_descriptor,
        new java.lang.String[] { "SnapshotName", "SnapshotRoot", "Permission", "Owner", "Group", "CreateTime", });
    internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor =
      getDescriptor().getMessageTypes().get(48);
    internal_static_hadoop_hdfs_RollingUpgradeStatusProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_RollingUpgradeStatusProto_descriptor,
        new java.lang.String[] { "BlockPoolId", "Finalized", });
    internal_static_hadoop_hdfs_StorageUuidsProto_descriptor =
      getDescriptor().getMessageTypes().get(49);
    internal_static_hadoop_hdfs_StorageUuidsProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_StorageUuidsProto_descriptor,
        new java.lang.String[] { "StorageUuids", });
    internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor =
      getDescriptor().getMessageTypes().get(50);
    internal_static_hadoop_hdfs_BlockTokenSecretProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BlockTokenSecretProto_descriptor,
        new java.lang.String[] { "ExpiryDate", "KeyId", "UserId", "BlockPoolId", "BlockId", "Modes", "StorageTypes", "StorageIds", "HandshakeSecret", });
    internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor =
      getDescriptor().getMessageTypes().get(51);
    internal_static_hadoop_hdfs_RouterFederatedStateProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor,
        new java.lang.String[] { "NamespaceStateIds", });
    internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor =
      internal_static_hadoop_hdfs_RouterFederatedStateProto_descriptor.getNestedTypes().get(0);
    internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_RouterFederatedStateProto_NamespaceStateIdsEntry_descriptor,
        new java.lang.String[] { "Key", "Value", });
    org.apache.hadoop.security.proto.SecurityProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.AclProtos.getDescriptor();
  }

  // @@protoc_insertion_point(outer_class_scope)
}