DataTransferProtos.java

// Generated by the protocol buffer compiler.  DO NOT EDIT!
// source: datatransfer.proto

// Protobuf Java Version: 3.25.5
package org.apache.hadoop.hdfs.protocol.proto;

public final class DataTransferProtos {
  private DataTransferProtos() {}
  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite registry) {
  }

  public static void registerAllExtensions(
      org.apache.hadoop.thirdparty.protobuf.ExtensionRegistry registry) {
    registerAllExtensions(
        (org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite) registry);
  }
  /**
   * <pre>
   * Status is a 4-bit enum
   * </pre>
   *
   * Protobuf enum {@code hadoop.hdfs.Status}
   */
  public enum Status
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>SUCCESS = 0;</code>
     */
    SUCCESS(0),
    /**
     * <code>ERROR = 1;</code>
     */
    ERROR(1),
    /**
     * <code>ERROR_CHECKSUM = 2;</code>
     */
    ERROR_CHECKSUM(2),
    /**
     * <code>ERROR_INVALID = 3;</code>
     */
    ERROR_INVALID(3),
    /**
     * <code>ERROR_EXISTS = 4;</code>
     */
    ERROR_EXISTS(4),
    /**
     * <code>ERROR_ACCESS_TOKEN = 5;</code>
     */
    ERROR_ACCESS_TOKEN(5),
    /**
     * <code>CHECKSUM_OK = 6;</code>
     */
    CHECKSUM_OK(6),
    /**
     * <code>ERROR_UNSUPPORTED = 7;</code>
     */
    ERROR_UNSUPPORTED(7),
    /**
     * <pre>
     * Quick restart
     * </pre>
     *
     * <code>OOB_RESTART = 8;</code>
     */
    OOB_RESTART(8),
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED1 = 9;</code>
     */
    OOB_RESERVED1(9),
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED2 = 10;</code>
     */
    OOB_RESERVED2(10),
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED3 = 11;</code>
     */
    OOB_RESERVED3(11),
    /**
     * <code>IN_PROGRESS = 12;</code>
     */
    IN_PROGRESS(12),
    /**
     * <code>ERROR_BLOCK_PINNED = 13;</code>
     */
    ERROR_BLOCK_PINNED(13),
    ;

    /**
     * <code>SUCCESS = 0;</code>
     */
    public static final int SUCCESS_VALUE = 0;
    /**
     * <code>ERROR = 1;</code>
     */
    public static final int ERROR_VALUE = 1;
    /**
     * <code>ERROR_CHECKSUM = 2;</code>
     */
    public static final int ERROR_CHECKSUM_VALUE = 2;
    /**
     * <code>ERROR_INVALID = 3;</code>
     */
    public static final int ERROR_INVALID_VALUE = 3;
    /**
     * <code>ERROR_EXISTS = 4;</code>
     */
    public static final int ERROR_EXISTS_VALUE = 4;
    /**
     * <code>ERROR_ACCESS_TOKEN = 5;</code>
     */
    public static final int ERROR_ACCESS_TOKEN_VALUE = 5;
    /**
     * <code>CHECKSUM_OK = 6;</code>
     */
    public static final int CHECKSUM_OK_VALUE = 6;
    /**
     * <code>ERROR_UNSUPPORTED = 7;</code>
     */
    public static final int ERROR_UNSUPPORTED_VALUE = 7;
    /**
     * <pre>
     * Quick restart
     * </pre>
     *
     * <code>OOB_RESTART = 8;</code>
     */
    public static final int OOB_RESTART_VALUE = 8;
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED1 = 9;</code>
     */
    public static final int OOB_RESERVED1_VALUE = 9;
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED2 = 10;</code>
     */
    public static final int OOB_RESERVED2_VALUE = 10;
    /**
     * <pre>
     * Reserved
     * </pre>
     *
     * <code>OOB_RESERVED3 = 11;</code>
     */
    public static final int OOB_RESERVED3_VALUE = 11;
    /**
     * <code>IN_PROGRESS = 12;</code>
     */
    public static final int IN_PROGRESS_VALUE = 12;
    /**
     * <code>ERROR_BLOCK_PINNED = 13;</code>
     */
    public static final int ERROR_BLOCK_PINNED_VALUE = 13;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static Status valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static Status forNumber(int value) {
      switch (value) {
        case 0: return SUCCESS;
        case 1: return ERROR;
        case 2: return ERROR_CHECKSUM;
        case 3: return ERROR_INVALID;
        case 4: return ERROR_EXISTS;
        case 5: return ERROR_ACCESS_TOKEN;
        case 6: return CHECKSUM_OK;
        case 7: return ERROR_UNSUPPORTED;
        case 8: return OOB_RESTART;
        case 9: return OOB_RESERVED1;
        case 10: return OOB_RESERVED2;
        case 11: return OOB_RESERVED3;
        case 12: return IN_PROGRESS;
        case 13: return ERROR_BLOCK_PINNED;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Status>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        Status> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<Status>() {
            public Status findValueByNumber(int number) {
              return Status.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(0);
    }

    private static final Status[] VALUES = values();

    public static Status valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private Status(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.Status)
  }

  /**
   * Protobuf enum {@code hadoop.hdfs.ShortCircuitFdResponse}
   */
  public enum ShortCircuitFdResponse
      implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
    /**
     * <code>DO_NOT_USE_RECEIPT_VERIFICATION = 0;</code>
     */
    DO_NOT_USE_RECEIPT_VERIFICATION(0),
    /**
     * <code>USE_RECEIPT_VERIFICATION = 1;</code>
     */
    USE_RECEIPT_VERIFICATION(1),
    ;

    /**
     * <code>DO_NOT_USE_RECEIPT_VERIFICATION = 0;</code>
     */
    public static final int DO_NOT_USE_RECEIPT_VERIFICATION_VALUE = 0;
    /**
     * <code>USE_RECEIPT_VERIFICATION = 1;</code>
     */
    public static final int USE_RECEIPT_VERIFICATION_VALUE = 1;


    public final int getNumber() {
      return value;
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     * @deprecated Use {@link #forNumber(int)} instead.
     */
    @java.lang.Deprecated
    public static ShortCircuitFdResponse valueOf(int value) {
      return forNumber(value);
    }

    /**
     * @param value The numeric wire value of the corresponding enum entry.
     * @return The enum associated with the given numeric wire value.
     */
    public static ShortCircuitFdResponse forNumber(int value) {
      switch (value) {
        case 0: return DO_NOT_USE_RECEIPT_VERIFICATION;
        case 1: return USE_RECEIPT_VERIFICATION;
        default: return null;
      }
    }

    public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ShortCircuitFdResponse>
        internalGetValueMap() {
      return internalValueMap;
    }
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
        ShortCircuitFdResponse> internalValueMap =
          new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<ShortCircuitFdResponse>() {
            public ShortCircuitFdResponse findValueByNumber(int number) {
              return ShortCircuitFdResponse.forNumber(number);
            }
          };

    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
        getValueDescriptor() {
      return getDescriptor().getValues().get(ordinal());
    }
    public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptorForType() {
      return getDescriptor();
    }
    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.getDescriptor().getEnumTypes().get(1);
    }

    private static final ShortCircuitFdResponse[] VALUES = values();

    public static ShortCircuitFdResponse valueOf(
        org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
      if (desc.getType() != getDescriptor()) {
        throw new java.lang.IllegalArgumentException(
          "EnumValueDescriptor is not for this type.");
      }
      return VALUES[desc.getIndex()];
    }

    private final int value;

    private ShortCircuitFdResponse(int value) {
      this.value = value;
    }

    // @@protoc_insertion_point(enum_scope:hadoop.hdfs.ShortCircuitFdResponse)
  }

  public interface DataTransferEncryptorMessageProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataTransferEncryptorMessageProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus();

    /**
     * <code>optional bytes payload = 2;</code>
     * @return Whether the payload field is set.
     */
    boolean hasPayload();
    /**
     * <code>optional bytes payload = 2;</code>
     * @return The payload.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getPayload();

    /**
     * <code>optional string message = 3;</code>
     * @return Whether the message field is set.
     */
    boolean hasMessage();
    /**
     * <code>optional string message = 3;</code>
     * @return The message.
     */
    java.lang.String getMessage();
    /**
     * <code>optional string message = 3;</code>
     * @return The bytes for message.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes();

    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> 
        getCipherOptionList();
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index);
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    int getCipherOptionCount();
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> 
        getCipherOptionOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
        int index);

    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     * @return Whether the handshakeSecret field is set.
     */
    boolean hasHandshakeSecret();
    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     * @return The handshakeSecret.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret();
    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder();

    /**
     * <code>optional bool accessTokenError = 6;</code>
     * @return Whether the accessTokenError field is set.
     */
    boolean hasAccessTokenError();
    /**
     * <code>optional bool accessTokenError = 6;</code>
     * @return The accessTokenError.
     */
    boolean getAccessTokenError();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
   */
  public static final class DataTransferEncryptorMessageProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DataTransferEncryptorMessageProto)
      DataTransferEncryptorMessageProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DataTransferEncryptorMessageProto.newBuilder() to construct.
    private DataTransferEncryptorMessageProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DataTransferEncryptorMessageProto() {
      status_ = 0;
      payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      message_ = "";
      cipherOption_ = java.util.Collections.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DataTransferEncryptorMessageProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus}
     */
    public enum DataTransferEncryptorStatus
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>SUCCESS = 0;</code>
       */
      SUCCESS(0),
      /**
       * <code>ERROR_UNKNOWN_KEY = 1;</code>
       */
      ERROR_UNKNOWN_KEY(1),
      /**
       * <code>ERROR = 2;</code>
       */
      ERROR(2),
      ;

      /**
       * <code>SUCCESS = 0;</code>
       */
      public static final int SUCCESS_VALUE = 0;
      /**
       * <code>ERROR_UNKNOWN_KEY = 1;</code>
       */
      public static final int ERROR_UNKNOWN_KEY_VALUE = 1;
      /**
       * <code>ERROR = 2;</code>
       */
      public static final int ERROR_VALUE = 2;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static DataTransferEncryptorStatus valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static DataTransferEncryptorStatus forNumber(int value) {
        switch (value) {
          case 0: return SUCCESS;
          case 1: return ERROR_UNKNOWN_KEY;
          case 2: return ERROR;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<DataTransferEncryptorStatus>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          DataTransferEncryptorStatus> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<DataTransferEncryptorStatus>() {
              public DataTransferEncryptorStatus findValueByNumber(int number) {
                return DataTransferEncryptorStatus.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final DataTransferEncryptorStatus[] VALUES = values();

      public static DataTransferEncryptorStatus valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private DataTransferEncryptorStatus(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus)
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS : result;
    }

    public static final int PAYLOAD_FIELD_NUMBER = 2;
    private org.apache.hadoop.thirdparty.protobuf.ByteString payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes payload = 2;</code>
     * @return Whether the payload field is set.
     */
    @java.lang.Override
    public boolean hasPayload() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional bytes payload = 2;</code>
     * @return The payload.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getPayload() {
      return payload_;
    }

    public static final int MESSAGE_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private volatile java.lang.Object message_ = "";
    /**
     * <code>optional string message = 3;</code>
     * @return Whether the message field is set.
     */
    @java.lang.Override
    public boolean hasMessage() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional string message = 3;</code>
     * @return The message.
     */
    @java.lang.Override
    public java.lang.String getMessage() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          message_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string message = 3;</code>
     * @return The bytes for message.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        message_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CIPHEROPTION_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> cipherOption_;
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> getCipherOptionList() {
      return cipherOption_;
    }
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> 
        getCipherOptionOrBuilderList() {
      return cipherOption_;
    }
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    @java.lang.Override
    public int getCipherOptionCount() {
      return cipherOption_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
      return cipherOption_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
        int index) {
      return cipherOption_.get(index);
    }

    public static final int HANDSHAKESECRET_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto handshakeSecret_;
    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     * @return Whether the handshakeSecret field is set.
     */
    @java.lang.Override
    public boolean hasHandshakeSecret() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     * @return The handshakeSecret.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret() {
      return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
    }
    /**
     * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder() {
      return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
    }

    public static final int ACCESSTOKENERROR_FIELD_NUMBER = 6;
    private boolean accessTokenError_ = false;
    /**
     * <code>optional bool accessTokenError = 6;</code>
     * @return Whether the accessTokenError field is set.
     */
    @java.lang.Override
    public boolean hasAccessTokenError() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional bool accessTokenError = 6;</code>
     * @return The accessTokenError.
     */
    @java.lang.Override
    public boolean getAccessTokenError() {
      return accessTokenError_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getCipherOptionCount(); i++) {
        if (!getCipherOption(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasHandshakeSecret()) {
        if (!getHandshakeSecret().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeBytes(2, payload_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 3, message_);
      }
      for (int i = 0; i < cipherOption_.size(); i++) {
        output.writeMessage(4, cipherOption_.get(i));
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(5, getHandshakeSecret());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBool(6, accessTokenError_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(2, payload_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(3, message_);
      }
      for (int i = 0; i < cipherOption_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, cipherOption_.get(i));
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getHandshakeSecret());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(6, accessTokenError_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (hasPayload() != other.hasPayload()) return false;
      if (hasPayload()) {
        if (!getPayload()
            .equals(other.getPayload())) return false;
      }
      if (hasMessage() != other.hasMessage()) return false;
      if (hasMessage()) {
        if (!getMessage()
            .equals(other.getMessage())) return false;
      }
      if (!getCipherOptionList()
          .equals(other.getCipherOptionList())) return false;
      if (hasHandshakeSecret() != other.hasHandshakeSecret()) return false;
      if (hasHandshakeSecret()) {
        if (!getHandshakeSecret()
            .equals(other.getHandshakeSecret())) return false;
      }
      if (hasAccessTokenError() != other.hasAccessTokenError()) return false;
      if (hasAccessTokenError()) {
        if (getAccessTokenError()
            != other.getAccessTokenError()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      if (hasPayload()) {
        hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
        hash = (53 * hash) + getPayload().hashCode();
      }
      if (hasMessage()) {
        hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
        hash = (53 * hash) + getMessage().hashCode();
      }
      if (getCipherOptionCount() > 0) {
        hash = (37 * hash) + CIPHEROPTION_FIELD_NUMBER;
        hash = (53 * hash) + getCipherOptionList().hashCode();
      }
      if (hasHandshakeSecret()) {
        hash = (37 * hash) + HANDSHAKESECRET_FIELD_NUMBER;
        hash = (53 * hash) + getHandshakeSecret().hashCode();
      }
      if (hasAccessTokenError()) {
        hash = (37 * hash) + ACCESSTOKENERROR_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getAccessTokenError());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.DataTransferEncryptorMessageProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataTransferEncryptorMessageProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getCipherOptionFieldBuilder();
          getHandshakeSecretFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        message_ = "";
        if (cipherOptionBuilder_ == null) {
          cipherOption_ = java.util.Collections.emptyList();
        } else {
          cipherOption_ = null;
          cipherOptionBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000008);
        handshakeSecret_ = null;
        if (handshakeSecretBuilder_ != null) {
          handshakeSecretBuilder_.dispose();
          handshakeSecretBuilder_ = null;
        }
        accessTokenError_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result) {
        if (cipherOptionBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0)) {
            cipherOption_ = java.util.Collections.unmodifiableList(cipherOption_);
            bitField0_ = (bitField0_ & ~0x00000008);
          }
          result.cipherOption_ = cipherOption_;
        } else {
          result.cipherOption_ = cipherOptionBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.payload_ = payload_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.message_ = message_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.handshakeSecret_ = handshakeSecretBuilder_ == null
              ? handshakeSecret_
              : handshakeSecretBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.accessTokenError_ = accessTokenError_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        if (other.hasPayload()) {
          setPayload(other.getPayload());
        }
        if (other.hasMessage()) {
          message_ = other.message_;
          bitField0_ |= 0x00000004;
          onChanged();
        }
        if (cipherOptionBuilder_ == null) {
          if (!other.cipherOption_.isEmpty()) {
            if (cipherOption_.isEmpty()) {
              cipherOption_ = other.cipherOption_;
              bitField0_ = (bitField0_ & ~0x00000008);
            } else {
              ensureCipherOptionIsMutable();
              cipherOption_.addAll(other.cipherOption_);
            }
            onChanged();
          }
        } else {
          if (!other.cipherOption_.isEmpty()) {
            if (cipherOptionBuilder_.isEmpty()) {
              cipherOptionBuilder_.dispose();
              cipherOptionBuilder_ = null;
              cipherOption_ = other.cipherOption_;
              bitField0_ = (bitField0_ & ~0x00000008);
              cipherOptionBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getCipherOptionFieldBuilder() : null;
            } else {
              cipherOptionBuilder_.addAllMessages(other.cipherOption_);
            }
          }
        }
        if (other.hasHandshakeSecret()) {
          mergeHandshakeSecret(other.getHandshakeSecret());
        }
        if (other.hasAccessTokenError()) {
          setAccessTokenError(other.getAccessTokenError());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        for (int i = 0; i < getCipherOptionCount(); i++) {
          if (!getCipherOption(i).isInitialized()) {
            return false;
          }
        }
        if (hasHandshakeSecret()) {
          if (!getHandshakeSecret().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                payload_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                message_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.PARSER,
                        extensionRegistry);
                if (cipherOptionBuilder_ == null) {
                  ensureCipherOptionIsMutable();
                  cipherOption_.add(m);
                } else {
                  cipherOptionBuilder_.addMessage(m);
                }
                break;
              } // case 34
              case 42: {
                input.readMessage(
                    getHandshakeSecretFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 48: {
                accessTokenError_ = input.readBool();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString payload_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes payload = 2;</code>
       * @return Whether the payload field is set.
       */
      @java.lang.Override
      public boolean hasPayload() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional bytes payload = 2;</code>
       * @return The payload.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getPayload() {
        return payload_;
      }
      /**
       * <code>optional bytes payload = 2;</code>
       * @param value The payload to set.
       * @return This builder for chaining.
       */
      public Builder setPayload(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        payload_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes payload = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearPayload() {
        bitField0_ = (bitField0_ & ~0x00000002);
        payload_ = getDefaultInstance().getPayload();
        onChanged();
        return this;
      }

      private java.lang.Object message_ = "";
      /**
       * <code>optional string message = 3;</code>
       * @return Whether the message field is set.
       */
      public boolean hasMessage() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional string message = 3;</code>
       * @return The message.
       */
      public java.lang.String getMessage() {
        java.lang.Object ref = message_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            message_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string message = 3;</code>
       * @return The bytes for message.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getMessageBytes() {
        java.lang.Object ref = message_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          message_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string message = 3;</code>
       * @param value The message to set.
       * @return This builder for chaining.
       */
      public Builder setMessage(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional string message = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearMessage() {
        message_ = getDefaultInstance().getMessage();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }
      /**
       * <code>optional string message = 3;</code>
       * @param value The bytes for message to set.
       * @return This builder for chaining.
       */
      public Builder setMessageBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> cipherOption_ =
        java.util.Collections.emptyList();
      private void ensureCipherOptionIsMutable() {
        if (!((bitField0_ & 0x00000008) != 0)) {
          cipherOption_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto>(cipherOption_);
          bitField0_ |= 0x00000008;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> cipherOptionBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> getCipherOptionList() {
        if (cipherOptionBuilder_ == null) {
          return java.util.Collections.unmodifiableList(cipherOption_);
        } else {
          return cipherOptionBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public int getCipherOptionCount() {
        if (cipherOptionBuilder_ == null) {
          return cipherOption_.size();
        } else {
          return cipherOptionBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto getCipherOption(int index) {
        if (cipherOptionBuilder_ == null) {
          return cipherOption_.get(index);
        } else {
          return cipherOptionBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder setCipherOption(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
        if (cipherOptionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCipherOptionIsMutable();
          cipherOption_.set(index, value);
          onChanged();
        } else {
          cipherOptionBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder setCipherOption(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
        if (cipherOptionBuilder_ == null) {
          ensureCipherOptionIsMutable();
          cipherOption_.set(index, builderForValue.build());
          onChanged();
        } else {
          cipherOptionBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder addCipherOption(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
        if (cipherOptionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCipherOptionIsMutable();
          cipherOption_.add(value);
          onChanged();
        } else {
          cipherOptionBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder addCipherOption(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto value) {
        if (cipherOptionBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureCipherOptionIsMutable();
          cipherOption_.add(index, value);
          onChanged();
        } else {
          cipherOptionBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder addCipherOption(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
        if (cipherOptionBuilder_ == null) {
          ensureCipherOptionIsMutable();
          cipherOption_.add(builderForValue.build());
          onChanged();
        } else {
          cipherOptionBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder addCipherOption(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder builderForValue) {
        if (cipherOptionBuilder_ == null) {
          ensureCipherOptionIsMutable();
          cipherOption_.add(index, builderForValue.build());
          onChanged();
        } else {
          cipherOptionBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder addAllCipherOption(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto> values) {
        if (cipherOptionBuilder_ == null) {
          ensureCipherOptionIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, cipherOption_);
          onChanged();
        } else {
          cipherOptionBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder clearCipherOption() {
        if (cipherOptionBuilder_ == null) {
          cipherOption_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000008);
          onChanged();
        } else {
          cipherOptionBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public Builder removeCipherOption(int index) {
        if (cipherOptionBuilder_ == null) {
          ensureCipherOptionIsMutable();
          cipherOption_.remove(index);
          onChanged();
        } else {
          cipherOptionBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder getCipherOptionBuilder(
          int index) {
        return getCipherOptionFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder getCipherOptionOrBuilder(
          int index) {
        if (cipherOptionBuilder_ == null) {
          return cipherOption_.get(index);  } else {
          return cipherOptionBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> 
           getCipherOptionOrBuilderList() {
        if (cipherOptionBuilder_ != null) {
          return cipherOptionBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(cipherOption_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder() {
        return getCipherOptionFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder addCipherOptionBuilder(
          int index) {
        return getCipherOptionFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.CipherOptionProto cipherOption = 4;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder> 
           getCipherOptionBuilderList() {
        return getCipherOptionFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder> 
          getCipherOptionFieldBuilder() {
        if (cipherOptionBuilder_ == null) {
          cipherOptionBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProtoOrBuilder>(
                  cipherOption_,
                  ((bitField0_ & 0x00000008) != 0),
                  getParentForChildren(),
                  isClean());
          cipherOption_ = null;
        }
        return cipherOptionBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto handshakeSecret_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder> handshakeSecretBuilder_;
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       * @return Whether the handshakeSecret field is set.
       */
      public boolean hasHandshakeSecret() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       * @return The handshakeSecret.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getHandshakeSecret() {
        if (handshakeSecretBuilder_ == null) {
          return handshakeSecret_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
        } else {
          return handshakeSecretBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public Builder setHandshakeSecret(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto value) {
        if (handshakeSecretBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          handshakeSecret_ = value;
        } else {
          handshakeSecretBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public Builder setHandshakeSecret(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder builderForValue) {
        if (handshakeSecretBuilder_ == null) {
          handshakeSecret_ = builderForValue.build();
        } else {
          handshakeSecretBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public Builder mergeHandshakeSecret(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto value) {
        if (handshakeSecretBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            handshakeSecret_ != null &&
            handshakeSecret_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance()) {
            getHandshakeSecretBuilder().mergeFrom(value);
          } else {
            handshakeSecret_ = value;
          }
        } else {
          handshakeSecretBuilder_.mergeFrom(value);
        }
        if (handshakeSecret_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public Builder clearHandshakeSecret() {
        bitField0_ = (bitField0_ & ~0x00000010);
        handshakeSecret_ = null;
        if (handshakeSecretBuilder_ != null) {
          handshakeSecretBuilder_.dispose();
          handshakeSecretBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder getHandshakeSecretBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getHandshakeSecretFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder getHandshakeSecretOrBuilder() {
        if (handshakeSecretBuilder_ != null) {
          return handshakeSecretBuilder_.getMessageOrBuilder();
        } else {
          return handshakeSecret_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance() : handshakeSecret_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.HandshakeSecretProto handshakeSecret = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder> 
          getHandshakeSecretFieldBuilder() {
        if (handshakeSecretBuilder_ == null) {
          handshakeSecretBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder>(
                  getHandshakeSecret(),
                  getParentForChildren(),
                  isClean());
          handshakeSecret_ = null;
        }
        return handshakeSecretBuilder_;
      }

      private boolean accessTokenError_ ;
      /**
       * <code>optional bool accessTokenError = 6;</code>
       * @return Whether the accessTokenError field is set.
       */
      @java.lang.Override
      public boolean hasAccessTokenError() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>optional bool accessTokenError = 6;</code>
       * @return The accessTokenError.
       */
      @java.lang.Override
      public boolean getAccessTokenError() {
        return accessTokenError_;
      }
      /**
       * <code>optional bool accessTokenError = 6;</code>
       * @param value The accessTokenError to set.
       * @return This builder for chaining.
       */
      public Builder setAccessTokenError(boolean value) {

        accessTokenError_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool accessTokenError = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearAccessTokenError() {
        bitField0_ = (bitField0_ & ~0x00000020);
        accessTokenError_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferEncryptorMessageProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferEncryptorMessageProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DataTransferEncryptorMessageProto>() {
      @java.lang.Override
      public DataTransferEncryptorMessageProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferEncryptorMessageProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferEncryptorMessageProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface HandshakeSecretProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.HandshakeSecretProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required bytes secret = 1;</code>
     * @return Whether the secret field is set.
     */
    boolean hasSecret();
    /**
     * <code>required bytes secret = 1;</code>
     * @return The secret.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getSecret();

    /**
     * <code>required string bpid = 2;</code>
     * @return Whether the bpid field is set.
     */
    boolean hasBpid();
    /**
     * <code>required string bpid = 2;</code>
     * @return The bpid.
     */
    java.lang.String getBpid();
    /**
     * <code>required string bpid = 2;</code>
     * @return The bytes for bpid.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getBpidBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.HandshakeSecretProto}
   */
  public static final class HandshakeSecretProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.HandshakeSecretProto)
      HandshakeSecretProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use HandshakeSecretProto.newBuilder() to construct.
    private HandshakeSecretProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private HandshakeSecretProto() {
      secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      bpid_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new HandshakeSecretProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder.class);
    }

    private int bitField0_;
    public static final int SECRET_FIELD_NUMBER = 1;
    private org.apache.hadoop.thirdparty.protobuf.ByteString secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes secret = 1;</code>
     * @return Whether the secret field is set.
     */
    @java.lang.Override
    public boolean hasSecret() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required bytes secret = 1;</code>
     * @return The secret.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getSecret() {
      return secret_;
    }

    public static final int BPID_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object bpid_ = "";
    /**
     * <code>required string bpid = 2;</code>
     * @return Whether the bpid field is set.
     */
    @java.lang.Override
    public boolean hasBpid() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string bpid = 2;</code>
     * @return The bpid.
     */
    @java.lang.Override
    public java.lang.String getBpid() {
      java.lang.Object ref = bpid_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          bpid_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string bpid = 2;</code>
     * @return The bytes for bpid.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getBpidBytes() {
      java.lang.Object ref = bpid_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        bpid_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSecret()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBpid()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBytes(1, secret_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, bpid_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(1, secret_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, bpid_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto) obj;

      if (hasSecret() != other.hasSecret()) return false;
      if (hasSecret()) {
        if (!getSecret()
            .equals(other.getSecret())) return false;
      }
      if (hasBpid() != other.hasBpid()) return false;
      if (hasBpid()) {
        if (!getBpid()
            .equals(other.getBpid())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSecret()) {
        hash = (37 * hash) + SECRET_FIELD_NUMBER;
        hash = (53 * hash) + getSecret().hashCode();
      }
      if (hasBpid()) {
        hash = (37 * hash) + BPID_FIELD_NUMBER;
        hash = (53 * hash) + getBpid().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.HandshakeSecretProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.HandshakeSecretProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        bpid_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.secret_ = secret_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.bpid_ = bpid_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto.getDefaultInstance()) return this;
        if (other.hasSecret()) {
          setSecret(other.getSecret());
        }
        if (other.hasBpid()) {
          bpid_ = other.bpid_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSecret()) {
          return false;
        }
        if (!hasBpid()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                secret_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                bpid_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.thirdparty.protobuf.ByteString secret_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes secret = 1;</code>
       * @return Whether the secret field is set.
       */
      @java.lang.Override
      public boolean hasSecret() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required bytes secret = 1;</code>
       * @return The secret.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getSecret() {
        return secret_;
      }
      /**
       * <code>required bytes secret = 1;</code>
       * @param value The secret to set.
       * @return This builder for chaining.
       */
      public Builder setSecret(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        secret_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes secret = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSecret() {
        bitField0_ = (bitField0_ & ~0x00000001);
        secret_ = getDefaultInstance().getSecret();
        onChanged();
        return this;
      }

      private java.lang.Object bpid_ = "";
      /**
       * <code>required string bpid = 2;</code>
       * @return Whether the bpid field is set.
       */
      public boolean hasBpid() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string bpid = 2;</code>
       * @return The bpid.
       */
      public java.lang.String getBpid() {
        java.lang.Object ref = bpid_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            bpid_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string bpid = 2;</code>
       * @return The bytes for bpid.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getBpidBytes() {
        java.lang.Object ref = bpid_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          bpid_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string bpid = 2;</code>
       * @param value The bpid to set.
       * @return This builder for chaining.
       */
      public Builder setBpid(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        bpid_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string bpid = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBpid() {
        bpid_ = getDefaultInstance().getBpid();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string bpid = 2;</code>
       * @param value The bytes for bpid to set.
       * @return This builder for chaining.
       */
      public Builder setBpidBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        bpid_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HandshakeSecretProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.HandshakeSecretProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<HandshakeSecretProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<HandshakeSecretProto>() {
      @java.lang.Override
      public HandshakeSecretProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<HandshakeSecretProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<HandshakeSecretProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BaseHeaderProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BaseHeaderProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    boolean hasBlock();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return The block.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock();
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder();

    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     * @return Whether the token field is set.
     */
    boolean hasToken();
    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     * @return The token.
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken();
    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     * @return Whether the traceInfo field is set.
     */
    boolean hasTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     * @return The traceInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
   */
  public static final class BaseHeaderProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BaseHeaderProto)
      BaseHeaderProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BaseHeaderProto.newBuilder() to construct.
    private BaseHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BaseHeaderProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BaseHeaderProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
    }

    private int bitField0_;
    public static final int BLOCK_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return Whether the block field is set.
     */
    @java.lang.Override
    public boolean hasBlock() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     * @return The block.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
    }
    /**
     * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
      return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
    }

    public static final int TOKEN_FIELD_NUMBER = 2;
    private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     * @return Whether the token field is set.
     */
    @java.lang.Override
    public boolean hasToken() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     * @return The token.
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
      return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
    }
    /**
     * <code>optional .hadoop.common.TokenProto token = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
      return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
    }

    public static final int TRACEINFO_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     * @return Whether the traceInfo field is set.
     */
    @java.lang.Override
    public boolean hasTraceInfo() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     * @return The traceInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getBlock().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasToken()) {
        if (!getToken().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getBlock());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getToken());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getTraceInfo());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getBlock());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getToken());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getTraceInfo());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) obj;

      if (hasBlock() != other.hasBlock()) return false;
      if (hasBlock()) {
        if (!getBlock()
            .equals(other.getBlock())) return false;
      }
      if (hasToken() != other.hasToken()) return false;
      if (hasToken()) {
        if (!getToken()
            .equals(other.getToken())) return false;
      }
      if (hasTraceInfo() != other.hasTraceInfo()) return false;
      if (hasTraceInfo()) {
        if (!getTraceInfo()
            .equals(other.getTraceInfo())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBlock()) {
        hash = (37 * hash) + BLOCK_FIELD_NUMBER;
        hash = (53 * hash) + getBlock().hashCode();
      }
      if (hasToken()) {
        hash = (37 * hash) + TOKEN_FIELD_NUMBER;
        hash = (53 * hash) + getToken().hashCode();
      }
      if (hasTraceInfo()) {
        hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
        hash = (53 * hash) + getTraceInfo().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.BaseHeaderProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BaseHeaderProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlockFieldBuilder();
          getTokenFieldBuilder();
          getTraceInfoFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        token_ = null;
        if (tokenBuilder_ != null) {
          tokenBuilder_.dispose();
          tokenBuilder_ = null;
        }
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.block_ = blockBuilder_ == null
              ? block_
              : blockBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.token_ = tokenBuilder_ == null
              ? token_
              : tokenBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.traceInfo_ = traceInfoBuilder_ == null
              ? traceInfo_
              : traceInfoBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) return this;
        if (other.hasBlock()) {
          mergeBlock(other.getBlock());
        }
        if (other.hasToken()) {
          mergeToken(other.getToken());
        }
        if (other.hasTraceInfo()) {
          mergeTraceInfo(other.getTraceInfo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBlock()) {
          return false;
        }
        if (!getBlock().isInitialized()) {
          return false;
        }
        if (hasToken()) {
          if (!getToken().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getBlockFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getTokenFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getTraceInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto block_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> blockBuilder_;
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       * @return Whether the block field is set.
       */
      public boolean hasBlock() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       * @return The block.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto getBlock() {
        if (blockBuilder_ == null) {
          return block_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
        } else {
          return blockBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder setBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (blockBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          block_ = value;
        } else {
          blockBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder setBlock(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder builderForValue) {
        if (blockBuilder_ == null) {
          block_ = builderForValue.build();
        } else {
          blockBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder mergeBlock(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto value) {
        if (blockBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            block_ != null &&
            block_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance()) {
            getBlockBuilder().mergeFrom(value);
          } else {
            block_ = value;
          }
        } else {
          blockBuilder_.mergeFrom(value);
        }
        if (block_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public Builder clearBlock() {
        bitField0_ = (bitField0_ & ~0x00000001);
        block_ = null;
        if (blockBuilder_ != null) {
          blockBuilder_.dispose();
          blockBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder getBlockBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getBlockFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder getBlockOrBuilder() {
        if (blockBuilder_ != null) {
          return blockBuilder_.getMessageOrBuilder();
        } else {
          return block_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.getDefaultInstance() : block_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ExtendedBlockProto block = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder> 
          getBlockFieldBuilder() {
        if (blockBuilder_ == null) {
          blockBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProtoOrBuilder>(
                  getBlock(),
                  getParentForChildren(),
                  isClean());
          block_ = null;
        }
        return blockBuilder_;
      }

      private org.apache.hadoop.security.proto.SecurityProtos.TokenProto token_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> tokenBuilder_;
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       * @return Whether the token field is set.
       */
      public boolean hasToken() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       * @return The token.
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getToken() {
        if (tokenBuilder_ == null) {
          return token_ == null ? org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
        } else {
          return tokenBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public Builder setToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (tokenBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          token_ = value;
        } else {
          tokenBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public Builder setToken(
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (tokenBuilder_ == null) {
          token_ = builderForValue.build();
        } else {
          tokenBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public Builder mergeToken(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (tokenBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            token_ != null &&
            token_ != org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance()) {
            getTokenBuilder().mergeFrom(value);
          } else {
            token_ = value;
          }
        } else {
          tokenBuilder_.mergeFrom(value);
        }
        if (token_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public Builder clearToken() {
        bitField0_ = (bitField0_ & ~0x00000002);
        token_ = null;
        if (tokenBuilder_ != null) {
          tokenBuilder_.dispose();
          tokenBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getTokenBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getTokenFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getTokenOrBuilder() {
        if (tokenBuilder_ != null) {
          return tokenBuilder_.getMessageOrBuilder();
        } else {
          return token_ == null ?
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance() : token_;
        }
      }
      /**
       * <code>optional .hadoop.common.TokenProto token = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
          getTokenFieldBuilder() {
        if (tokenBuilder_ == null) {
          tokenBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
                  getToken(),
                  getParentForChildren(),
                  isClean());
          token_ = null;
        }
        return tokenBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       * @return Whether the traceInfo field is set.
       */
      public boolean hasTraceInfo() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       * @return The traceInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
        if (traceInfoBuilder_ == null) {
          return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        } else {
          return traceInfoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          traceInfo_ = value;
        } else {
          traceInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public Builder setTraceInfo(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
        if (traceInfoBuilder_ == null) {
          traceInfo_ = builderForValue.build();
        } else {
          traceInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            traceInfo_ != null &&
            traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
            getTraceInfoBuilder().mergeFrom(value);
          } else {
            traceInfo_ = value;
          }
        } else {
          traceInfoBuilder_.mergeFrom(value);
        }
        if (traceInfo_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public Builder clearTraceInfo() {
        bitField0_ = (bitField0_ & ~0x00000004);
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getTraceInfoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
        if (traceInfoBuilder_ != null) {
          return traceInfoBuilder_.getMessageOrBuilder();
        } else {
          return traceInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> 
          getTraceInfoFieldBuilder() {
        if (traceInfoBuilder_ == null) {
          traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
                  getTraceInfo(),
                  getParentForChildren(),
                  isClean());
          traceInfo_ = null;
        }
        return traceInfoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BaseHeaderProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BaseHeaderProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BaseHeaderProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BaseHeaderProto>() {
      @java.lang.Override
      public BaseHeaderProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BaseHeaderProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BaseHeaderProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DataTransferTraceInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DataTransferTraceInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional uint64 traceId = 1;</code>
     * @return Whether the traceId field is set.
     */
    boolean hasTraceId();
    /**
     * <code>optional uint64 traceId = 1;</code>
     * @return The traceId.
     */
    long getTraceId();

    /**
     * <code>optional uint64 parentId = 2;</code>
     * @return Whether the parentId field is set.
     */
    boolean hasParentId();
    /**
     * <code>optional uint64 parentId = 2;</code>
     * @return The parentId.
     */
    long getParentId();

    /**
     * <code>optional bytes spanContext = 3;</code>
     * @return Whether the spanContext field is set.
     */
    boolean hasSpanContext();
    /**
     * <code>optional bytes spanContext = 3;</code>
     * @return The spanContext.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
   */
  public static final class DataTransferTraceInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DataTransferTraceInfoProto)
      DataTransferTraceInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DataTransferTraceInfoProto.newBuilder() to construct.
    private DataTransferTraceInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DataTransferTraceInfoProto() {
      spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DataTransferTraceInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int TRACEID_FIELD_NUMBER = 1;
    private long traceId_ = 0L;
    /**
     * <code>optional uint64 traceId = 1;</code>
     * @return Whether the traceId field is set.
     */
    @java.lang.Override
    public boolean hasTraceId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional uint64 traceId = 1;</code>
     * @return The traceId.
     */
    @java.lang.Override
    public long getTraceId() {
      return traceId_;
    }

    public static final int PARENTID_FIELD_NUMBER = 2;
    private long parentId_ = 0L;
    /**
     * <code>optional uint64 parentId = 2;</code>
     * @return Whether the parentId field is set.
     */
    @java.lang.Override
    public boolean hasParentId() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint64 parentId = 2;</code>
     * @return The parentId.
     */
    @java.lang.Override
    public long getParentId() {
      return parentId_;
    }

    public static final int SPANCONTEXT_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>optional bytes spanContext = 3;</code>
     * @return Whether the spanContext field is set.
     */
    @java.lang.Override
    public boolean hasSpanContext() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional bytes spanContext = 3;</code>
     * @return The spanContext.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext() {
      return spanContext_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt64(1, traceId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, parentId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, spanContext_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(1, traceId_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, parentId_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, spanContext_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) obj;

      if (hasTraceId() != other.hasTraceId()) return false;
      if (hasTraceId()) {
        if (getTraceId()
            != other.getTraceId()) return false;
      }
      if (hasParentId() != other.hasParentId()) return false;
      if (hasParentId()) {
        if (getParentId()
            != other.getParentId()) return false;
      }
      if (hasSpanContext() != other.hasSpanContext()) return false;
      if (hasSpanContext()) {
        if (!getSpanContext()
            .equals(other.getSpanContext())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasTraceId()) {
        hash = (37 * hash) + TRACEID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getTraceId());
      }
      if (hasParentId()) {
        hash = (37 * hash) + PARENTID_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getParentId());
      }
      if (hasSpanContext()) {
        hash = (37 * hash) + SPANCONTEXT_FIELD_NUMBER;
        hash = (53 * hash) + getSpanContext().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.DataTransferTraceInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DataTransferTraceInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        traceId_ = 0L;
        parentId_ = 0L;
        spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.traceId_ = traceId_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.parentId_ = parentId_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.spanContext_ = spanContext_;
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) return this;
        if (other.hasTraceId()) {
          setTraceId(other.getTraceId());
        }
        if (other.hasParentId()) {
          setParentId(other.getParentId());
        }
        if (other.hasSpanContext()) {
          setSpanContext(other.getSpanContext());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                traceId_ = input.readUInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                parentId_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                spanContext_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long traceId_ ;
      /**
       * <code>optional uint64 traceId = 1;</code>
       * @return Whether the traceId field is set.
       */
      @java.lang.Override
      public boolean hasTraceId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional uint64 traceId = 1;</code>
       * @return The traceId.
       */
      @java.lang.Override
      public long getTraceId() {
        return traceId_;
      }
      /**
       * <code>optional uint64 traceId = 1;</code>
       * @param value The traceId to set.
       * @return This builder for chaining.
       */
      public Builder setTraceId(long value) {

        traceId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 traceId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearTraceId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        traceId_ = 0L;
        onChanged();
        return this;
      }

      private long parentId_ ;
      /**
       * <code>optional uint64 parentId = 2;</code>
       * @return Whether the parentId field is set.
       */
      @java.lang.Override
      public boolean hasParentId() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional uint64 parentId = 2;</code>
       * @return The parentId.
       */
      @java.lang.Override
      public long getParentId() {
        return parentId_;
      }
      /**
       * <code>optional uint64 parentId = 2;</code>
       * @param value The parentId to set.
       * @return This builder for chaining.
       */
      public Builder setParentId(long value) {

        parentId_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 parentId = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearParentId() {
        bitField0_ = (bitField0_ & ~0x00000002);
        parentId_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString spanContext_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>optional bytes spanContext = 3;</code>
       * @return Whether the spanContext field is set.
       */
      @java.lang.Override
      public boolean hasSpanContext() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional bytes spanContext = 3;</code>
       * @return The spanContext.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getSpanContext() {
        return spanContext_;
      }
      /**
       * <code>optional bytes spanContext = 3;</code>
       * @param value The spanContext to set.
       * @return This builder for chaining.
       */
      public Builder setSpanContext(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        spanContext_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional bytes spanContext = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearSpanContext() {
        bitField0_ = (bitField0_ & ~0x00000004);
        spanContext_ = getDefaultInstance().getSpanContext();
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DataTransferTraceInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DataTransferTraceInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferTraceInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DataTransferTraceInfoProto>() {
      @java.lang.Override
      public DataTransferTraceInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferTraceInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DataTransferTraceInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ClientOperationHeaderProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ClientOperationHeaderProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     * @return Whether the baseHeader field is set.
     */
    boolean hasBaseHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     * @return The baseHeader.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder();

    /**
     * <code>required string clientName = 2;</code>
     * @return Whether the clientName field is set.
     */
    boolean hasClientName();
    /**
     * <code>required string clientName = 2;</code>
     * @return The clientName.
     */
    java.lang.String getClientName();
    /**
     * <code>required string clientName = 2;</code>
     * @return The bytes for clientName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getClientNameBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
   */
  public static final class ClientOperationHeaderProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ClientOperationHeaderProto)
      ClientOperationHeaderProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ClientOperationHeaderProto.newBuilder() to construct.
    private ClientOperationHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ClientOperationHeaderProto() {
      clientName_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ClientOperationHeaderProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
    }

    private int bitField0_;
    public static final int BASEHEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     * @return Whether the baseHeader field is set.
     */
    @java.lang.Override
    public boolean hasBaseHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     * @return The baseHeader.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
      return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
      return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
    }

    public static final int CLIENTNAME_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object clientName_ = "";
    /**
     * <code>required string clientName = 2;</code>
     * @return Whether the clientName field is set.
     */
    @java.lang.Override
    public boolean hasClientName() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string clientName = 2;</code>
     * @return The clientName.
     */
    @java.lang.Override
    public java.lang.String getClientName() {
      java.lang.Object ref = clientName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          clientName_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string clientName = 2;</code>
     * @return The bytes for clientName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getClientNameBytes() {
      java.lang.Object ref = clientName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        clientName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBaseHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasClientName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getBaseHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getBaseHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, clientName_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getBaseHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, clientName_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) obj;

      if (hasBaseHeader() != other.hasBaseHeader()) return false;
      if (hasBaseHeader()) {
        if (!getBaseHeader()
            .equals(other.getBaseHeader())) return false;
      }
      if (hasClientName() != other.hasClientName()) return false;
      if (hasClientName()) {
        if (!getClientName()
            .equals(other.getClientName())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBaseHeader()) {
        hash = (37 * hash) + BASEHEADER_FIELD_NUMBER;
        hash = (53 * hash) + getBaseHeader().hashCode();
      }
      if (hasClientName()) {
        hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
        hash = (53 * hash) + getClientName().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ClientOperationHeaderProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ClientOperationHeaderProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBaseHeaderFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        baseHeader_ = null;
        if (baseHeaderBuilder_ != null) {
          baseHeaderBuilder_.dispose();
          baseHeaderBuilder_ = null;
        }
        clientName_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.baseHeader_ = baseHeaderBuilder_ == null
              ? baseHeader_
              : baseHeaderBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.clientName_ = clientName_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) return this;
        if (other.hasBaseHeader()) {
          mergeBaseHeader(other.getBaseHeader());
        }
        if (other.hasClientName()) {
          clientName_ = other.clientName_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBaseHeader()) {
          return false;
        }
        if (!hasClientName()) {
          return false;
        }
        if (!getBaseHeader().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getBaseHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                clientName_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto baseHeader_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> baseHeaderBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       * @return Whether the baseHeader field is set.
       */
      public boolean hasBaseHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       * @return The baseHeader.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getBaseHeader() {
        if (baseHeaderBuilder_ == null) {
          return baseHeader_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
        } else {
          return baseHeaderBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public Builder setBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (baseHeaderBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          baseHeader_ = value;
        } else {
          baseHeaderBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public Builder setBaseHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (baseHeaderBuilder_ == null) {
          baseHeader_ = builderForValue.build();
        } else {
          baseHeaderBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public Builder mergeBaseHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (baseHeaderBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            baseHeader_ != null &&
            baseHeader_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getBaseHeaderBuilder().mergeFrom(value);
          } else {
            baseHeader_ = value;
          }
        } else {
          baseHeaderBuilder_.mergeFrom(value);
        }
        if (baseHeader_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public Builder clearBaseHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        baseHeader_ = null;
        if (baseHeaderBuilder_ != null) {
          baseHeaderBuilder_.dispose();
          baseHeaderBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getBaseHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getBaseHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getBaseHeaderOrBuilder() {
        if (baseHeaderBuilder_ != null) {
          return baseHeaderBuilder_.getMessageOrBuilder();
        } else {
          return baseHeader_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : baseHeader_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto baseHeader = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getBaseHeaderFieldBuilder() {
        if (baseHeaderBuilder_ == null) {
          baseHeaderBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getBaseHeader(),
                  getParentForChildren(),
                  isClean());
          baseHeader_ = null;
        }
        return baseHeaderBuilder_;
      }

      private java.lang.Object clientName_ = "";
      /**
       * <code>required string clientName = 2;</code>
       * @return Whether the clientName field is set.
       */
      public boolean hasClientName() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string clientName = 2;</code>
       * @return The clientName.
       */
      public java.lang.String getClientName() {
        java.lang.Object ref = clientName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            clientName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string clientName = 2;</code>
       * @return The bytes for clientName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientNameBytes() {
        java.lang.Object ref = clientName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          clientName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string clientName = 2;</code>
       * @param value The clientName to set.
       * @return This builder for chaining.
       */
      public Builder setClientName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        clientName_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string clientName = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearClientName() {
        clientName_ = getDefaultInstance().getClientName();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string clientName = 2;</code>
       * @param value The bytes for clientName to set.
       * @return This builder for chaining.
       */
      public Builder setClientNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        clientName_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientOperationHeaderProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientOperationHeaderProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ClientOperationHeaderProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ClientOperationHeaderProto>() {
      @java.lang.Override
      public ClientOperationHeaderProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ClientOperationHeaderProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ClientOperationHeaderProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface CachingStrategyProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.CachingStrategyProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>optional bool dropBehind = 1;</code>
     * @return Whether the dropBehind field is set.
     */
    boolean hasDropBehind();
    /**
     * <code>optional bool dropBehind = 1;</code>
     * @return The dropBehind.
     */
    boolean getDropBehind();

    /**
     * <code>optional int64 readahead = 2;</code>
     * @return Whether the readahead field is set.
     */
    boolean hasReadahead();
    /**
     * <code>optional int64 readahead = 2;</code>
     * @return The readahead.
     */
    long getReadahead();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
   */
  public static final class CachingStrategyProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.CachingStrategyProto)
      CachingStrategyProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use CachingStrategyProto.newBuilder() to construct.
    private CachingStrategyProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private CachingStrategyProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new CachingStrategyProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
    }

    private int bitField0_;
    public static final int DROPBEHIND_FIELD_NUMBER = 1;
    private boolean dropBehind_ = false;
    /**
     * <code>optional bool dropBehind = 1;</code>
     * @return Whether the dropBehind field is set.
     */
    @java.lang.Override
    public boolean hasDropBehind() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>optional bool dropBehind = 1;</code>
     * @return The dropBehind.
     */
    @java.lang.Override
    public boolean getDropBehind() {
      return dropBehind_;
    }

    public static final int READAHEAD_FIELD_NUMBER = 2;
    private long readahead_ = 0L;
    /**
     * <code>optional int64 readahead = 2;</code>
     * @return Whether the readahead field is set.
     */
    @java.lang.Override
    public boolean hasReadahead() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional int64 readahead = 2;</code>
     * @return The readahead.
     */
    @java.lang.Override
    public long getReadahead() {
      return readahead_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeBool(1, dropBehind_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, readahead_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(1, dropBehind_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, readahead_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) obj;

      if (hasDropBehind() != other.hasDropBehind()) return false;
      if (hasDropBehind()) {
        if (getDropBehind()
            != other.getDropBehind()) return false;
      }
      if (hasReadahead() != other.hasReadahead()) return false;
      if (hasReadahead()) {
        if (getReadahead()
            != other.getReadahead()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasDropBehind()) {
        hash = (37 * hash) + DROPBEHIND_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getDropBehind());
      }
      if (hasReadahead()) {
        hash = (37 * hash) + READAHEAD_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getReadahead());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.CachingStrategyProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.CachingStrategyProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        dropBehind_ = false;
        readahead_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.dropBehind_ = dropBehind_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.readahead_ = readahead_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) return this;
        if (other.hasDropBehind()) {
          setDropBehind(other.getDropBehind());
        }
        if (other.hasReadahead()) {
          setReadahead(other.getReadahead());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                dropBehind_ = input.readBool();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                readahead_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private boolean dropBehind_ ;
      /**
       * <code>optional bool dropBehind = 1;</code>
       * @return Whether the dropBehind field is set.
       */
      @java.lang.Override
      public boolean hasDropBehind() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>optional bool dropBehind = 1;</code>
       * @return The dropBehind.
       */
      @java.lang.Override
      public boolean getDropBehind() {
        return dropBehind_;
      }
      /**
       * <code>optional bool dropBehind = 1;</code>
       * @param value The dropBehind to set.
       * @return This builder for chaining.
       */
      public Builder setDropBehind(boolean value) {

        dropBehind_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool dropBehind = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearDropBehind() {
        bitField0_ = (bitField0_ & ~0x00000001);
        dropBehind_ = false;
        onChanged();
        return this;
      }

      private long readahead_ ;
      /**
       * <code>optional int64 readahead = 2;</code>
       * @return Whether the readahead field is set.
       */
      @java.lang.Override
      public boolean hasReadahead() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional int64 readahead = 2;</code>
       * @return The readahead.
       */
      @java.lang.Override
      public long getReadahead() {
        return readahead_;
      }
      /**
       * <code>optional int64 readahead = 2;</code>
       * @param value The readahead to set.
       * @return This builder for chaining.
       */
      public Builder setReadahead(long value) {

        readahead_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional int64 readahead = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearReadahead() {
        bitField0_ = (bitField0_ & ~0x00000002);
        readahead_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.CachingStrategyProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.CachingStrategyProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<CachingStrategyProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<CachingStrategyProto>() {
      @java.lang.Override
      public CachingStrategyProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<CachingStrategyProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<CachingStrategyProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpReadBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpReadBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>required uint64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    boolean hasOffset();
    /**
     * <code>required uint64 offset = 2;</code>
     * @return The offset.
     */
    long getOffset();

    /**
     * <code>required uint64 len = 3;</code>
     * @return Whether the len field is set.
     */
    boolean hasLen();
    /**
     * <code>required uint64 len = 3;</code>
     * @return The len.
     */
    long getLen();

    /**
     * <code>optional bool sendChecksums = 4 [default = true];</code>
     * @return Whether the sendChecksums field is set.
     */
    boolean hasSendChecksums();
    /**
     * <code>optional bool sendChecksums = 4 [default = true];</code>
     * @return The sendChecksums.
     */
    boolean getSendChecksums();

    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     * @return Whether the cachingStrategy field is set.
     */
    boolean hasCachingStrategy();
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     * @return The cachingStrategy.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
   */
  public static final class OpReadBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpReadBlockProto)
      OpReadBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpReadBlockProto.newBuilder() to construct.
    private OpReadBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpReadBlockProto() {
      sendChecksums_ = true;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpReadBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }

    public static final int OFFSET_FIELD_NUMBER = 2;
    private long offset_ = 0L;
    /**
     * <code>required uint64 offset = 2;</code>
     * @return Whether the offset field is set.
     */
    @java.lang.Override
    public boolean hasOffset() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 offset = 2;</code>
     * @return The offset.
     */
    @java.lang.Override
    public long getOffset() {
      return offset_;
    }

    public static final int LEN_FIELD_NUMBER = 3;
    private long len_ = 0L;
    /**
     * <code>required uint64 len = 3;</code>
     * @return Whether the len field is set.
     */
    @java.lang.Override
    public boolean hasLen() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required uint64 len = 3;</code>
     * @return The len.
     */
    @java.lang.Override
    public long getLen() {
      return len_;
    }

    public static final int SENDCHECKSUMS_FIELD_NUMBER = 4;
    private boolean sendChecksums_ = true;
    /**
     * <code>optional bool sendChecksums = 4 [default = true];</code>
     * @return Whether the sendChecksums field is set.
     */
    @java.lang.Override
    public boolean hasSendChecksums() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional bool sendChecksums = 4 [default = true];</code>
     * @return The sendChecksums.
     */
    @java.lang.Override
    public boolean getSendChecksums() {
      return sendChecksums_;
    }

    public static final int CACHINGSTRATEGY_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     * @return Whether the cachingStrategy field is set.
     */
    @java.lang.Override
    public boolean hasCachingStrategy() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     * @return The cachingStrategy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
      return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
    }
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
      return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasOffset()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLen()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, offset_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeUInt64(3, len_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBool(4, sendChecksums_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(5, getCachingStrategy());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, offset_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, len_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, sendChecksums_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getCachingStrategy());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (hasOffset() != other.hasOffset()) return false;
      if (hasOffset()) {
        if (getOffset()
            != other.getOffset()) return false;
      }
      if (hasLen() != other.hasLen()) return false;
      if (hasLen()) {
        if (getLen()
            != other.getLen()) return false;
      }
      if (hasSendChecksums() != other.hasSendChecksums()) return false;
      if (hasSendChecksums()) {
        if (getSendChecksums()
            != other.getSendChecksums()) return false;
      }
      if (hasCachingStrategy() != other.hasCachingStrategy()) return false;
      if (hasCachingStrategy()) {
        if (!getCachingStrategy()
            .equals(other.getCachingStrategy())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (hasOffset()) {
        hash = (37 * hash) + OFFSET_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getOffset());
      }
      if (hasLen()) {
        hash = (37 * hash) + LEN_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLen());
      }
      if (hasSendChecksums()) {
        hash = (37 * hash) + SENDCHECKSUMS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getSendChecksums());
      }
      if (hasCachingStrategy()) {
        hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
        hash = (53 * hash) + getCachingStrategy().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpReadBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpReadBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getCachingStrategyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        offset_ = 0L;
        len_ = 0L;
        sendChecksums_ = true;
        cachingStrategy_ = null;
        if (cachingStrategyBuilder_ != null) {
          cachingStrategyBuilder_.dispose();
          cachingStrategyBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.offset_ = offset_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.len_ = len_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.sendChecksums_ = sendChecksums_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.cachingStrategy_ = cachingStrategyBuilder_ == null
              ? cachingStrategy_
              : cachingStrategyBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (other.hasOffset()) {
          setOffset(other.getOffset());
        }
        if (other.hasLen()) {
          setLen(other.getLen());
        }
        if (other.hasSendChecksums()) {
          setSendChecksums(other.getSendChecksums());
        }
        if (other.hasCachingStrategy()) {
          mergeCachingStrategy(other.getCachingStrategy());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!hasOffset()) {
          return false;
        }
        if (!hasLen()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                offset_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 24: {
                len_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                sendChecksums_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              case 42: {
                input.readMessage(
                    getCachingStrategyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private long offset_ ;
      /**
       * <code>required uint64 offset = 2;</code>
       * @return Whether the offset field is set.
       */
      @java.lang.Override
      public boolean hasOffset() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 offset = 2;</code>
       * @return The offset.
       */
      @java.lang.Override
      public long getOffset() {
        return offset_;
      }
      /**
       * <code>required uint64 offset = 2;</code>
       * @param value The offset to set.
       * @return This builder for chaining.
       */
      public Builder setOffset(long value) {

        offset_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 offset = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearOffset() {
        bitField0_ = (bitField0_ & ~0x00000002);
        offset_ = 0L;
        onChanged();
        return this;
      }

      private long len_ ;
      /**
       * <code>required uint64 len = 3;</code>
       * @return Whether the len field is set.
       */
      @java.lang.Override
      public boolean hasLen() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required uint64 len = 3;</code>
       * @return The len.
       */
      @java.lang.Override
      public long getLen() {
        return len_;
      }
      /**
       * <code>required uint64 len = 3;</code>
       * @param value The len to set.
       * @return This builder for chaining.
       */
      public Builder setLen(long value) {

        len_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 len = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearLen() {
        bitField0_ = (bitField0_ & ~0x00000004);
        len_ = 0L;
        onChanged();
        return this;
      }

      private boolean sendChecksums_ = true;
      /**
       * <code>optional bool sendChecksums = 4 [default = true];</code>
       * @return Whether the sendChecksums field is set.
       */
      @java.lang.Override
      public boolean hasSendChecksums() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional bool sendChecksums = 4 [default = true];</code>
       * @return The sendChecksums.
       */
      @java.lang.Override
      public boolean getSendChecksums() {
        return sendChecksums_;
      }
      /**
       * <code>optional bool sendChecksums = 4 [default = true];</code>
       * @param value The sendChecksums to set.
       * @return This builder for chaining.
       */
      public Builder setSendChecksums(boolean value) {

        sendChecksums_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool sendChecksums = 4 [default = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearSendChecksums() {
        bitField0_ = (bitField0_ & ~0x00000008);
        sendChecksums_ = true;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       * @return Whether the cachingStrategy field is set.
       */
      public boolean hasCachingStrategy() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       * @return The cachingStrategy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
        if (cachingStrategyBuilder_ == null) {
          return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
        } else {
          return cachingStrategyBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
        if (cachingStrategyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          cachingStrategy_ = value;
        } else {
          cachingStrategyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public Builder setCachingStrategy(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
        if (cachingStrategyBuilder_ == null) {
          cachingStrategy_ = builderForValue.build();
        } else {
          cachingStrategyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
        if (cachingStrategyBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            cachingStrategy_ != null &&
            cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
            getCachingStrategyBuilder().mergeFrom(value);
          } else {
            cachingStrategy_ = value;
          }
        } else {
          cachingStrategyBuilder_.mergeFrom(value);
        }
        if (cachingStrategy_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public Builder clearCachingStrategy() {
        bitField0_ = (bitField0_ & ~0x00000010);
        cachingStrategy_ = null;
        if (cachingStrategyBuilder_ != null) {
          cachingStrategyBuilder_.dispose();
          cachingStrategyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getCachingStrategyFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
        if (cachingStrategyBuilder_ != null) {
          return cachingStrategyBuilder_.getMessageOrBuilder();
        } else {
          return cachingStrategy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> 
          getCachingStrategyFieldBuilder() {
        if (cachingStrategyBuilder_ == null) {
          cachingStrategyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
                  getCachingStrategy(),
                  getParentForChildren(),
                  isClean());
          cachingStrategy_ = null;
        }
        return cachingStrategyBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReadBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReadBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpReadBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpReadBlockProto>() {
      @java.lang.Override
      public OpReadBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpReadBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpReadBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ChecksumProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ChecksumProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
     * @return Whether the type field is set.
     */
    boolean hasType();
    /**
     * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
     * @return The type.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType();

    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return Whether the bytesPerChecksum field is set.
     */
    boolean hasBytesPerChecksum();
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return The bytesPerChecksum.
     */
    int getBytesPerChecksum();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ChecksumProto}
   */
  public static final class ChecksumProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ChecksumProto)
      ChecksumProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ChecksumProto.newBuilder() to construct.
    private ChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ChecksumProto() {
      type_ = 0;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ChecksumProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
    }

    private int bitField0_;
    public static final int TYPE_FIELD_NUMBER = 1;
    private int type_ = 0;
    /**
     * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
     * @return Whether the type field is set.
     */
    @java.lang.Override public boolean hasType() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
     * @return The type.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(type_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
    }

    public static final int BYTESPERCHECKSUM_FIELD_NUMBER = 2;
    private int bytesPerChecksum_ = 0;
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return Whether the bytesPerChecksum field is set.
     */
    @java.lang.Override
    public boolean hasBytesPerChecksum() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint32 bytesPerChecksum = 2;</code>
     * @return The bytesPerChecksum.
     */
    @java.lang.Override
    public int getBytesPerChecksum() {
      return bytesPerChecksum_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasType()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBytesPerChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, bytesPerChecksum_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, type_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, bytesPerChecksum_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) obj;

      if (hasType() != other.hasType()) return false;
      if (hasType()) {
        if (type_ != other.type_) return false;
      }
      if (hasBytesPerChecksum() != other.hasBytesPerChecksum()) return false;
      if (hasBytesPerChecksum()) {
        if (getBytesPerChecksum()
            != other.getBytesPerChecksum()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasType()) {
        hash = (37 * hash) + TYPE_FIELD_NUMBER;
        hash = (53 * hash) + type_;
      }
      if (hasBytesPerChecksum()) {
        hash = (37 * hash) + BYTESPERCHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getBytesPerChecksum();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ChecksumProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ChecksumProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        type_ = 0;
        bytesPerChecksum_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ChecksumProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.type_ = type_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.bytesPerChecksum_ = bytesPerChecksum_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) return this;
        if (other.hasType()) {
          setType(other.getType());
        }
        if (other.hasBytesPerChecksum()) {
          setBytesPerChecksum(other.getBytesPerChecksum());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasType()) {
          return false;
        }
        if (!hasBytesPerChecksum()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  type_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 16: {
                bytesPerChecksum_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int type_ = 0;
      /**
       * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
       * @return Whether the type field is set.
       */
      @java.lang.Override public boolean hasType() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
       * @return The type.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(type_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
       * @param value The type to set.
       * @return This builder for chaining.
       */
      public Builder setType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        type_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumTypeProto type = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearType() {
        bitField0_ = (bitField0_ & ~0x00000001);
        type_ = 0;
        onChanged();
        return this;
      }

      private int bytesPerChecksum_ ;
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return Whether the bytesPerChecksum field is set.
       */
      @java.lang.Override
      public boolean hasBytesPerChecksum() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return The bytesPerChecksum.
       */
      @java.lang.Override
      public int getBytesPerChecksum() {
        return bytesPerChecksum_;
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @param value The bytesPerChecksum to set.
       * @return This builder for chaining.
       */
      public Builder setBytesPerChecksum(int value) {

        bytesPerChecksum_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 bytesPerChecksum = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearBytesPerChecksum() {
        bitField0_ = (bitField0_ & ~0x00000002);
        bytesPerChecksum_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ChecksumProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ChecksumProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ChecksumProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ChecksumProto>() {
      @java.lang.Override
      public ChecksumProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ChecksumProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ChecksumProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpWriteBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpWriteBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> 
        getTargetsList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    int getTargetsCount();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getTargetsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
        int index);

    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return Whether the source field is set.
     */
    boolean hasSource();
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return The source.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();

    /**
     * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
     * @return Whether the stage field is set.
     */
    boolean hasStage();
    /**
     * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
     * @return The stage.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage();

    /**
     * <code>required uint32 pipelineSize = 5;</code>
     * @return Whether the pipelineSize field is set.
     */
    boolean hasPipelineSize();
    /**
     * <code>required uint32 pipelineSize = 5;</code>
     * @return The pipelineSize.
     */
    int getPipelineSize();

    /**
     * <code>required uint64 minBytesRcvd = 6;</code>
     * @return Whether the minBytesRcvd field is set.
     */
    boolean hasMinBytesRcvd();
    /**
     * <code>required uint64 minBytesRcvd = 6;</code>
     * @return The minBytesRcvd.
     */
    long getMinBytesRcvd();

    /**
     * <code>required uint64 maxBytesRcvd = 7;</code>
     * @return Whether the maxBytesRcvd field is set.
     */
    boolean hasMaxBytesRcvd();
    /**
     * <code>required uint64 maxBytesRcvd = 7;</code>
     * @return The maxBytesRcvd.
     */
    long getMaxBytesRcvd();

    /**
     * <code>required uint64 latestGenerationStamp = 8;</code>
     * @return Whether the latestGenerationStamp field is set.
     */
    boolean hasLatestGenerationStamp();
    /**
     * <code>required uint64 latestGenerationStamp = 8;</code>
     * @return The latestGenerationStamp.
     */
    long getLatestGenerationStamp();

    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     * @return Whether the requestedChecksum field is set.
     */
    boolean hasRequestedChecksum();
    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     * @return The requestedChecksum.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum();
    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     * @return Whether the cachingStrategy field is set.
     */
    boolean hasCachingStrategy();
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     * @return The cachingStrategy.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy();
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    boolean hasStorageType();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
     * @return The storageType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @return A list containing the targetStorageTypes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @return The count of targetStorageTypes.
     */
    int getTargetStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @param index The index of the element to return.
     * @return The targetStorageTypes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);

    /**
     * <pre>
     **
     * Hint to the DataNode that the block can be allocated on transient
     * storage i.e. memory and written to disk lazily. The DataNode is free
     * to ignore this hint.
     * </pre>
     *
     * <code>optional bool allowLazyPersist = 13 [default = false];</code>
     * @return Whether the allowLazyPersist field is set.
     */
    boolean hasAllowLazyPersist();
    /**
     * <pre>
     **
     * Hint to the DataNode that the block can be allocated on transient
     * storage i.e. memory and written to disk lazily. The DataNode is free
     * to ignore this hint.
     * </pre>
     *
     * <code>optional bool allowLazyPersist = 13 [default = false];</code>
     * @return The allowLazyPersist.
     */
    boolean getAllowLazyPersist();

    /**
     * <pre>
     *whether to pin the block, so Balancer won't move it.
     * </pre>
     *
     * <code>optional bool pinning = 14 [default = false];</code>
     * @return Whether the pinning field is set.
     */
    boolean hasPinning();
    /**
     * <pre>
     *whether to pin the block, so Balancer won't move it.
     * </pre>
     *
     * <code>optional bool pinning = 14 [default = false];</code>
     * @return The pinning.
     */
    boolean getPinning();

    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @return A list containing the targetPinnings.
     */
    java.util.List<java.lang.Boolean> getTargetPinningsList();
    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @return The count of targetPinnings.
     */
    int getTargetPinningsCount();
    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @param index The index of the element to return.
     * @return The targetPinnings at the given index.
     */
    boolean getTargetPinnings(int index);

    /**
     * <code>optional string storageId = 16;</code>
     * @return Whether the storageId field is set.
     */
    boolean hasStorageId();
    /**
     * <code>optional string storageId = 16;</code>
     * @return The storageId.
     */
    java.lang.String getStorageId();
    /**
     * <code>optional string storageId = 16;</code>
     * @return The bytes for storageId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdBytes();

    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @return A list containing the targetStorageIds.
     */
    java.util.List<java.lang.String>
        getTargetStorageIdsList();
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @return The count of targetStorageIds.
     */
    int getTargetStorageIdsCount();
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @param index The index of the element to return.
     * @return The targetStorageIds at the given index.
     */
    java.lang.String getTargetStorageIds(int index);
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @param index The index of the value to return.
     * @return The bytes of the targetStorageIds at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getTargetStorageIdsBytes(int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
   */
  public static final class OpWriteBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpWriteBlockProto)
      OpWriteBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpWriteBlockProto.newBuilder() to construct.
    private OpWriteBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpWriteBlockProto() {
      targets_ = java.util.Collections.emptyList();
      stage_ = 0;
      storageType_ = 1;
      targetStorageTypes_ = java.util.Collections.emptyList();
      targetPinnings_ = emptyBooleanList();
      storageId_ = "";
      targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpWriteBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
    }

    /**
     * Protobuf enum {@code hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage}
     */
    public enum BlockConstructionStage
        implements org.apache.hadoop.thirdparty.protobuf.ProtocolMessageEnum {
      /**
       * <code>PIPELINE_SETUP_APPEND = 0;</code>
       */
      PIPELINE_SETUP_APPEND(0),
      /**
       * <pre>
       * pipeline set up for failed PIPELINE_SETUP_APPEND recovery
       * </pre>
       *
       * <code>PIPELINE_SETUP_APPEND_RECOVERY = 1;</code>
       */
      PIPELINE_SETUP_APPEND_RECOVERY(1),
      /**
       * <pre>
       * data streaming
       * </pre>
       *
       * <code>DATA_STREAMING = 2;</code>
       */
      DATA_STREAMING(2),
      /**
       * <pre>
       * pipeline setup for failed data streaming recovery
       * </pre>
       *
       * <code>PIPELINE_SETUP_STREAMING_RECOVERY = 3;</code>
       */
      PIPELINE_SETUP_STREAMING_RECOVERY(3),
      /**
       * <pre>
       * close the block and pipeline
       * </pre>
       *
       * <code>PIPELINE_CLOSE = 4;</code>
       */
      PIPELINE_CLOSE(4),
      /**
       * <pre>
       * Recover a failed PIPELINE_CLOSE
       * </pre>
       *
       * <code>PIPELINE_CLOSE_RECOVERY = 5;</code>
       */
      PIPELINE_CLOSE_RECOVERY(5),
      /**
       * <pre>
       * pipeline set up for block creation
       * </pre>
       *
       * <code>PIPELINE_SETUP_CREATE = 6;</code>
       */
      PIPELINE_SETUP_CREATE(6),
      /**
       * <pre>
       * transfer RBW for adding datanodes
       * </pre>
       *
       * <code>TRANSFER_RBW = 7;</code>
       */
      TRANSFER_RBW(7),
      /**
       * <pre>
       * transfer Finalized for adding datanodes
       * </pre>
       *
       * <code>TRANSFER_FINALIZED = 8;</code>
       */
      TRANSFER_FINALIZED(8),
      ;

      /**
       * <code>PIPELINE_SETUP_APPEND = 0;</code>
       */
      public static final int PIPELINE_SETUP_APPEND_VALUE = 0;
      /**
       * <pre>
       * pipeline set up for failed PIPELINE_SETUP_APPEND recovery
       * </pre>
       *
       * <code>PIPELINE_SETUP_APPEND_RECOVERY = 1;</code>
       */
      public static final int PIPELINE_SETUP_APPEND_RECOVERY_VALUE = 1;
      /**
       * <pre>
       * data streaming
       * </pre>
       *
       * <code>DATA_STREAMING = 2;</code>
       */
      public static final int DATA_STREAMING_VALUE = 2;
      /**
       * <pre>
       * pipeline setup for failed data streaming recovery
       * </pre>
       *
       * <code>PIPELINE_SETUP_STREAMING_RECOVERY = 3;</code>
       */
      public static final int PIPELINE_SETUP_STREAMING_RECOVERY_VALUE = 3;
      /**
       * <pre>
       * close the block and pipeline
       * </pre>
       *
       * <code>PIPELINE_CLOSE = 4;</code>
       */
      public static final int PIPELINE_CLOSE_VALUE = 4;
      /**
       * <pre>
       * Recover a failed PIPELINE_CLOSE
       * </pre>
       *
       * <code>PIPELINE_CLOSE_RECOVERY = 5;</code>
       */
      public static final int PIPELINE_CLOSE_RECOVERY_VALUE = 5;
      /**
       * <pre>
       * pipeline set up for block creation
       * </pre>
       *
       * <code>PIPELINE_SETUP_CREATE = 6;</code>
       */
      public static final int PIPELINE_SETUP_CREATE_VALUE = 6;
      /**
       * <pre>
       * transfer RBW for adding datanodes
       * </pre>
       *
       * <code>TRANSFER_RBW = 7;</code>
       */
      public static final int TRANSFER_RBW_VALUE = 7;
      /**
       * <pre>
       * transfer Finalized for adding datanodes
       * </pre>
       *
       * <code>TRANSFER_FINALIZED = 8;</code>
       */
      public static final int TRANSFER_FINALIZED_VALUE = 8;


      public final int getNumber() {
        return value;
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       * @deprecated Use {@link #forNumber(int)} instead.
       */
      @java.lang.Deprecated
      public static BlockConstructionStage valueOf(int value) {
        return forNumber(value);
      }

      /**
       * @param value The numeric wire value of the corresponding enum entry.
       * @return The enum associated with the given numeric wire value.
       */
      public static BlockConstructionStage forNumber(int value) {
        switch (value) {
          case 0: return PIPELINE_SETUP_APPEND;
          case 1: return PIPELINE_SETUP_APPEND_RECOVERY;
          case 2: return DATA_STREAMING;
          case 3: return PIPELINE_SETUP_STREAMING_RECOVERY;
          case 4: return PIPELINE_CLOSE;
          case 5: return PIPELINE_CLOSE_RECOVERY;
          case 6: return PIPELINE_SETUP_CREATE;
          case 7: return TRANSFER_RBW;
          case 8: return TRANSFER_FINALIZED;
          default: return null;
        }
      }

      public static org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockConstructionStage>
          internalGetValueMap() {
        return internalValueMap;
      }
      private static final org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<
          BlockConstructionStage> internalValueMap =
            new org.apache.hadoop.thirdparty.protobuf.Internal.EnumLiteMap<BlockConstructionStage>() {
              public BlockConstructionStage findValueByNumber(int number) {
                return BlockConstructionStage.forNumber(number);
              }
            };

      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor
          getValueDescriptor() {
        return getDescriptor().getValues().get(ordinal());
      }
      public final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptorForType() {
        return getDescriptor();
      }
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumDescriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDescriptor().getEnumTypes().get(0);
      }

      private static final BlockConstructionStage[] VALUES = values();

      public static BlockConstructionStage valueOf(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.EnumValueDescriptor desc) {
        if (desc.getType() != getDescriptor()) {
          throw new java.lang.IllegalArgumentException(
            "EnumValueDescriptor is not for this type.");
        }
        return VALUES[desc.getIndex()];
      }

      private final int value;

      private BlockConstructionStage(int value) {
        this.value = value;
      }

      // @@protoc_insertion_point(enum_scope:hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage)
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }

    public static final int TARGETS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getTargetsOrBuilderList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public int getTargetsCount() {
      return targets_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
      return targets_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
        int index) {
      return targets_.get(index);
    }

    public static final int SOURCE_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return Whether the source field is set.
     */
    @java.lang.Override
    public boolean hasSource() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return The source.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
      return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
    }
    /**
     * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
      return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
    }

    public static final int STAGE_FIELD_NUMBER = 4;
    private int stage_ = 0;
    /**
     * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
     * @return Whether the stage field is set.
     */
    @java.lang.Override public boolean hasStage() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
     * @return The stage.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.forNumber(stage_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND : result;
    }

    public static final int PIPELINESIZE_FIELD_NUMBER = 5;
    private int pipelineSize_ = 0;
    /**
     * <code>required uint32 pipelineSize = 5;</code>
     * @return Whether the pipelineSize field is set.
     */
    @java.lang.Override
    public boolean hasPipelineSize() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required uint32 pipelineSize = 5;</code>
     * @return The pipelineSize.
     */
    @java.lang.Override
    public int getPipelineSize() {
      return pipelineSize_;
    }

    public static final int MINBYTESRCVD_FIELD_NUMBER = 6;
    private long minBytesRcvd_ = 0L;
    /**
     * <code>required uint64 minBytesRcvd = 6;</code>
     * @return Whether the minBytesRcvd field is set.
     */
    @java.lang.Override
    public boolean hasMinBytesRcvd() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>required uint64 minBytesRcvd = 6;</code>
     * @return The minBytesRcvd.
     */
    @java.lang.Override
    public long getMinBytesRcvd() {
      return minBytesRcvd_;
    }

    public static final int MAXBYTESRCVD_FIELD_NUMBER = 7;
    private long maxBytesRcvd_ = 0L;
    /**
     * <code>required uint64 maxBytesRcvd = 7;</code>
     * @return Whether the maxBytesRcvd field is set.
     */
    @java.lang.Override
    public boolean hasMaxBytesRcvd() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <code>required uint64 maxBytesRcvd = 7;</code>
     * @return The maxBytesRcvd.
     */
    @java.lang.Override
    public long getMaxBytesRcvd() {
      return maxBytesRcvd_;
    }

    public static final int LATESTGENERATIONSTAMP_FIELD_NUMBER = 8;
    private long latestGenerationStamp_ = 0L;
    /**
     * <code>required uint64 latestGenerationStamp = 8;</code>
     * @return Whether the latestGenerationStamp field is set.
     */
    @java.lang.Override
    public boolean hasLatestGenerationStamp() {
      return ((bitField0_ & 0x00000040) != 0);
    }
    /**
     * <code>required uint64 latestGenerationStamp = 8;</code>
     * @return The latestGenerationStamp.
     */
    @java.lang.Override
    public long getLatestGenerationStamp() {
      return latestGenerationStamp_;
    }

    public static final int REQUESTEDCHECKSUM_FIELD_NUMBER = 9;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     * @return Whether the requestedChecksum field is set.
     */
    @java.lang.Override
    public boolean hasRequestedChecksum() {
      return ((bitField0_ & 0x00000080) != 0);
    }
    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     * @return The requestedChecksum.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
      return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
    }
    /**
     * <pre>
     **
     * The requested checksum mechanism for this block write.
     * </pre>
     *
     * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
      return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
    }

    public static final int CACHINGSTRATEGY_FIELD_NUMBER = 10;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     * @return Whether the cachingStrategy field is set.
     */
    @java.lang.Override
    public boolean hasCachingStrategy() {
      return ((bitField0_ & 0x00000100) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     * @return The cachingStrategy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
      return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
    }
    /**
     * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
      return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
    }

    public static final int STORAGETYPE_FIELD_NUMBER = 11;
    private int storageType_ = 1;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    @java.lang.Override public boolean hasStorageType() {
      return ((bitField0_ & 0x00000200) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
     * @return The storageType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
    }

    public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 12;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> targetStorageTypes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> targetStorageTypes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @return A list containing the targetStorageTypes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @return The count of targetStorageTypes.
     */
    @java.lang.Override
    public int getTargetStorageTypesCount() {
      return targetStorageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
     * @param index The index of the element to return.
     * @return The targetStorageTypes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
      return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
    }

    public static final int ALLOWLAZYPERSIST_FIELD_NUMBER = 13;
    private boolean allowLazyPersist_ = false;
    /**
     * <pre>
     **
     * Hint to the DataNode that the block can be allocated on transient
     * storage i.e. memory and written to disk lazily. The DataNode is free
     * to ignore this hint.
     * </pre>
     *
     * <code>optional bool allowLazyPersist = 13 [default = false];</code>
     * @return Whether the allowLazyPersist field is set.
     */
    @java.lang.Override
    public boolean hasAllowLazyPersist() {
      return ((bitField0_ & 0x00000400) != 0);
    }
    /**
     * <pre>
     **
     * Hint to the DataNode that the block can be allocated on transient
     * storage i.e. memory and written to disk lazily. The DataNode is free
     * to ignore this hint.
     * </pre>
     *
     * <code>optional bool allowLazyPersist = 13 [default = false];</code>
     * @return The allowLazyPersist.
     */
    @java.lang.Override
    public boolean getAllowLazyPersist() {
      return allowLazyPersist_;
    }

    public static final int PINNING_FIELD_NUMBER = 14;
    private boolean pinning_ = false;
    /**
     * <pre>
     *whether to pin the block, so Balancer won't move it.
     * </pre>
     *
     * <code>optional bool pinning = 14 [default = false];</code>
     * @return Whether the pinning field is set.
     */
    @java.lang.Override
    public boolean hasPinning() {
      return ((bitField0_ & 0x00000800) != 0);
    }
    /**
     * <pre>
     *whether to pin the block, so Balancer won't move it.
     * </pre>
     *
     * <code>optional bool pinning = 14 [default = false];</code>
     * @return The pinning.
     */
    @java.lang.Override
    public boolean getPinning() {
      return pinning_;
    }

    public static final int TARGETPINNINGS_FIELD_NUMBER = 15;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList targetPinnings_ =
        emptyBooleanList();
    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @return A list containing the targetPinnings.
     */
    @java.lang.Override
    public java.util.List<java.lang.Boolean>
        getTargetPinningsList() {
      return targetPinnings_;
    }
    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @return The count of targetPinnings.
     */
    public int getTargetPinningsCount() {
      return targetPinnings_.size();
    }
    /**
     * <code>repeated bool targetPinnings = 15;</code>
     * @param index The index of the element to return.
     * @return The targetPinnings at the given index.
     */
    public boolean getTargetPinnings(int index) {
      return targetPinnings_.getBoolean(index);
    }

    public static final int STORAGEID_FIELD_NUMBER = 16;
    @SuppressWarnings("serial")
    private volatile java.lang.Object storageId_ = "";
    /**
     * <code>optional string storageId = 16;</code>
     * @return Whether the storageId field is set.
     */
    @java.lang.Override
    public boolean hasStorageId() {
      return ((bitField0_ & 0x00001000) != 0);
    }
    /**
     * <code>optional string storageId = 16;</code>
     * @return The storageId.
     */
    @java.lang.Override
    public java.lang.String getStorageId() {
      java.lang.Object ref = storageId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          storageId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string storageId = 16;</code>
     * @return The bytes for storageId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdBytes() {
      java.lang.Object ref = storageId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        storageId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 17;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList targetStorageIds_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @return A list containing the targetStorageIds.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getTargetStorageIdsList() {
      return targetStorageIds_;
    }
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @return The count of targetStorageIds.
     */
    public int getTargetStorageIdsCount() {
      return targetStorageIds_.size();
    }
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @param index The index of the element to return.
     * @return The targetStorageIds at the given index.
     */
    public java.lang.String getTargetStorageIds(int index) {
      return targetStorageIds_.get(index);
    }
    /**
     * <code>repeated string targetStorageIds = 17;</code>
     * @param index The index of the value to return.
     * @return The bytes of the targetStorageIds at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getTargetStorageIdsBytes(int index) {
      return targetStorageIds_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasStage()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasPipelineSize()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasMinBytesRcvd()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasMaxBytesRcvd()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLatestGenerationStamp()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasRequestedChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getTargetsCount(); i++) {
        if (!getTargets(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasSource()) {
        if (!getSource().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (!getRequestedChecksum().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      for (int i = 0; i < targets_.size(); i++) {
        output.writeMessage(2, targets_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(3, getSource());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeEnum(4, stage_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt32(5, pipelineSize_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeUInt64(6, minBytesRcvd_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt64(7, maxBytesRcvd_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        output.writeUInt64(8, latestGenerationStamp_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        output.writeMessage(9, getRequestedChecksum());
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        output.writeMessage(10, getCachingStrategy());
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        output.writeEnum(11, storageType_);
      }
      for (int i = 0; i < targetStorageTypes_.size(); i++) {
        output.writeEnum(12, targetStorageTypes_.get(i));
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        output.writeBool(13, allowLazyPersist_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        output.writeBool(14, pinning_);
      }
      for (int i = 0; i < targetPinnings_.size(); i++) {
        output.writeBool(15, targetPinnings_.getBoolean(i));
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 16, storageId_);
      }
      for (int i = 0; i < targetStorageIds_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 17, targetStorageIds_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      for (int i = 0; i < targets_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, targets_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getSource());
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(4, stage_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(5, pipelineSize_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, minBytesRcvd_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(7, maxBytesRcvd_);
      }
      if (((bitField0_ & 0x00000040) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(8, latestGenerationStamp_);
      }
      if (((bitField0_ & 0x00000080) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(9, getRequestedChecksum());
      }
      if (((bitField0_ & 0x00000100) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(10, getCachingStrategy());
      }
      if (((bitField0_ & 0x00000200) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(11, storageType_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < targetStorageTypes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(targetStorageTypes_.get(i));
        }
        size += dataSize;
        size += 1 * targetStorageTypes_.size();
      }
      if (((bitField0_ & 0x00000400) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(13, allowLazyPersist_);
      }
      if (((bitField0_ & 0x00000800) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(14, pinning_);
      }
      {
        int dataSize = 0;
        dataSize = 1 * getTargetPinningsList().size();
        size += dataSize;
        size += 1 * getTargetPinningsList().size();
      }
      if (((bitField0_ & 0x00001000) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(16, storageId_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < targetStorageIds_.size(); i++) {
          dataSize += computeStringSizeNoTag(targetStorageIds_.getRaw(i));
        }
        size += dataSize;
        size += 2 * getTargetStorageIdsList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (!getTargetsList()
          .equals(other.getTargetsList())) return false;
      if (hasSource() != other.hasSource()) return false;
      if (hasSource()) {
        if (!getSource()
            .equals(other.getSource())) return false;
      }
      if (hasStage() != other.hasStage()) return false;
      if (hasStage()) {
        if (stage_ != other.stage_) return false;
      }
      if (hasPipelineSize() != other.hasPipelineSize()) return false;
      if (hasPipelineSize()) {
        if (getPipelineSize()
            != other.getPipelineSize()) return false;
      }
      if (hasMinBytesRcvd() != other.hasMinBytesRcvd()) return false;
      if (hasMinBytesRcvd()) {
        if (getMinBytesRcvd()
            != other.getMinBytesRcvd()) return false;
      }
      if (hasMaxBytesRcvd() != other.hasMaxBytesRcvd()) return false;
      if (hasMaxBytesRcvd()) {
        if (getMaxBytesRcvd()
            != other.getMaxBytesRcvd()) return false;
      }
      if (hasLatestGenerationStamp() != other.hasLatestGenerationStamp()) return false;
      if (hasLatestGenerationStamp()) {
        if (getLatestGenerationStamp()
            != other.getLatestGenerationStamp()) return false;
      }
      if (hasRequestedChecksum() != other.hasRequestedChecksum()) return false;
      if (hasRequestedChecksum()) {
        if (!getRequestedChecksum()
            .equals(other.getRequestedChecksum())) return false;
      }
      if (hasCachingStrategy() != other.hasCachingStrategy()) return false;
      if (hasCachingStrategy()) {
        if (!getCachingStrategy()
            .equals(other.getCachingStrategy())) return false;
      }
      if (hasStorageType() != other.hasStorageType()) return false;
      if (hasStorageType()) {
        if (storageType_ != other.storageType_) return false;
      }
      if (!targetStorageTypes_.equals(other.targetStorageTypes_)) return false;
      if (hasAllowLazyPersist() != other.hasAllowLazyPersist()) return false;
      if (hasAllowLazyPersist()) {
        if (getAllowLazyPersist()
            != other.getAllowLazyPersist()) return false;
      }
      if (hasPinning() != other.hasPinning()) return false;
      if (hasPinning()) {
        if (getPinning()
            != other.getPinning()) return false;
      }
      if (!getTargetPinningsList()
          .equals(other.getTargetPinningsList())) return false;
      if (hasStorageId() != other.hasStorageId()) return false;
      if (hasStorageId()) {
        if (!getStorageId()
            .equals(other.getStorageId())) return false;
      }
      if (!getTargetStorageIdsList()
          .equals(other.getTargetStorageIdsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (getTargetsCount() > 0) {
        hash = (37 * hash) + TARGETS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetsList().hashCode();
      }
      if (hasSource()) {
        hash = (37 * hash) + SOURCE_FIELD_NUMBER;
        hash = (53 * hash) + getSource().hashCode();
      }
      if (hasStage()) {
        hash = (37 * hash) + STAGE_FIELD_NUMBER;
        hash = (53 * hash) + stage_;
      }
      if (hasPipelineSize()) {
        hash = (37 * hash) + PIPELINESIZE_FIELD_NUMBER;
        hash = (53 * hash) + getPipelineSize();
      }
      if (hasMinBytesRcvd()) {
        hash = (37 * hash) + MINBYTESRCVD_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getMinBytesRcvd());
      }
      if (hasMaxBytesRcvd()) {
        hash = (37 * hash) + MAXBYTESRCVD_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getMaxBytesRcvd());
      }
      if (hasLatestGenerationStamp()) {
        hash = (37 * hash) + LATESTGENERATIONSTAMP_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLatestGenerationStamp());
      }
      if (hasRequestedChecksum()) {
        hash = (37 * hash) + REQUESTEDCHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getRequestedChecksum().hashCode();
      }
      if (hasCachingStrategy()) {
        hash = (37 * hash) + CACHINGSTRATEGY_FIELD_NUMBER;
        hash = (53 * hash) + getCachingStrategy().hashCode();
      }
      if (hasStorageType()) {
        hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
        hash = (53 * hash) + storageType_;
      }
      if (getTargetStorageTypesCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + targetStorageTypes_.hashCode();
      }
      if (hasAllowLazyPersist()) {
        hash = (37 * hash) + ALLOWLAZYPERSIST_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getAllowLazyPersist());
      }
      if (hasPinning()) {
        hash = (37 * hash) + PINNING_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getPinning());
      }
      if (getTargetPinningsCount() > 0) {
        hash = (37 * hash) + TARGETPINNINGS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetPinningsList().hashCode();
      }
      if (hasStorageId()) {
        hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
        hash = (53 * hash) + getStorageId().hashCode();
      }
      if (getTargetStorageIdsCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetStorageIdsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpWriteBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpWriteBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getTargetsFieldBuilder();
          getSourceFieldBuilder();
          getRequestedChecksumFieldBuilder();
          getCachingStrategyFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
        } else {
          targets_ = null;
          targetsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        source_ = null;
        if (sourceBuilder_ != null) {
          sourceBuilder_.dispose();
          sourceBuilder_ = null;
        }
        stage_ = 0;
        pipelineSize_ = 0;
        minBytesRcvd_ = 0L;
        maxBytesRcvd_ = 0L;
        latestGenerationStamp_ = 0L;
        requestedChecksum_ = null;
        if (requestedChecksumBuilder_ != null) {
          requestedChecksumBuilder_.dispose();
          requestedChecksumBuilder_ = null;
        }
        cachingStrategy_ = null;
        if (cachingStrategyBuilder_ != null) {
          cachingStrategyBuilder_.dispose();
          cachingStrategyBuilder_ = null;
        }
        storageType_ = 1;
        targetStorageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000800);
        allowLazyPersist_ = false;
        pinning_ = false;
        targetPinnings_ = emptyBooleanList();
        storageId_ = "";
        targetStorageIds_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result) {
        if (targetsBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            targets_ = java.util.Collections.unmodifiableList(targets_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.targets_ = targets_;
        } else {
          result.targets_ = targetsBuilder_.build();
        }
        if (((bitField0_ & 0x00000800) != 0)) {
          targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
          bitField0_ = (bitField0_ & ~0x00000800);
        }
        result.targetStorageTypes_ = targetStorageTypes_;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.source_ = sourceBuilder_ == null
              ? source_
              : sourceBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.stage_ = stage_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.pipelineSize_ = pipelineSize_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.minBytesRcvd_ = minBytesRcvd_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.maxBytesRcvd_ = maxBytesRcvd_;
          to_bitField0_ |= 0x00000020;
        }
        if (((from_bitField0_ & 0x00000080) != 0)) {
          result.latestGenerationStamp_ = latestGenerationStamp_;
          to_bitField0_ |= 0x00000040;
        }
        if (((from_bitField0_ & 0x00000100) != 0)) {
          result.requestedChecksum_ = requestedChecksumBuilder_ == null
              ? requestedChecksum_
              : requestedChecksumBuilder_.build();
          to_bitField0_ |= 0x00000080;
        }
        if (((from_bitField0_ & 0x00000200) != 0)) {
          result.cachingStrategy_ = cachingStrategyBuilder_ == null
              ? cachingStrategy_
              : cachingStrategyBuilder_.build();
          to_bitField0_ |= 0x00000100;
        }
        if (((from_bitField0_ & 0x00000400) != 0)) {
          result.storageType_ = storageType_;
          to_bitField0_ |= 0x00000200;
        }
        if (((from_bitField0_ & 0x00001000) != 0)) {
          result.allowLazyPersist_ = allowLazyPersist_;
          to_bitField0_ |= 0x00000400;
        }
        if (((from_bitField0_ & 0x00002000) != 0)) {
          result.pinning_ = pinning_;
          to_bitField0_ |= 0x00000800;
        }
        if (((from_bitField0_ & 0x00004000) != 0)) {
          targetPinnings_.makeImmutable();
          result.targetPinnings_ = targetPinnings_;
        }
        if (((from_bitField0_ & 0x00008000) != 0)) {
          result.storageId_ = storageId_;
          to_bitField0_ |= 0x00001000;
        }
        if (((from_bitField0_ & 0x00010000) != 0)) {
          targetStorageIds_.makeImmutable();
          result.targetStorageIds_ = targetStorageIds_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (targetsBuilder_ == null) {
          if (!other.targets_.isEmpty()) {
            if (targets_.isEmpty()) {
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureTargetsIsMutable();
              targets_.addAll(other.targets_);
            }
            onChanged();
          }
        } else {
          if (!other.targets_.isEmpty()) {
            if (targetsBuilder_.isEmpty()) {
              targetsBuilder_.dispose();
              targetsBuilder_ = null;
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000002);
              targetsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTargetsFieldBuilder() : null;
            } else {
              targetsBuilder_.addAllMessages(other.targets_);
            }
          }
        }
        if (other.hasSource()) {
          mergeSource(other.getSource());
        }
        if (other.hasStage()) {
          setStage(other.getStage());
        }
        if (other.hasPipelineSize()) {
          setPipelineSize(other.getPipelineSize());
        }
        if (other.hasMinBytesRcvd()) {
          setMinBytesRcvd(other.getMinBytesRcvd());
        }
        if (other.hasMaxBytesRcvd()) {
          setMaxBytesRcvd(other.getMaxBytesRcvd());
        }
        if (other.hasLatestGenerationStamp()) {
          setLatestGenerationStamp(other.getLatestGenerationStamp());
        }
        if (other.hasRequestedChecksum()) {
          mergeRequestedChecksum(other.getRequestedChecksum());
        }
        if (other.hasCachingStrategy()) {
          mergeCachingStrategy(other.getCachingStrategy());
        }
        if (other.hasStorageType()) {
          setStorageType(other.getStorageType());
        }
        if (!other.targetStorageTypes_.isEmpty()) {
          if (targetStorageTypes_.isEmpty()) {
            targetStorageTypes_ = other.targetStorageTypes_;
            bitField0_ = (bitField0_ & ~0x00000800);
          } else {
            ensureTargetStorageTypesIsMutable();
            targetStorageTypes_.addAll(other.targetStorageTypes_);
          }
          onChanged();
        }
        if (other.hasAllowLazyPersist()) {
          setAllowLazyPersist(other.getAllowLazyPersist());
        }
        if (other.hasPinning()) {
          setPinning(other.getPinning());
        }
        if (!other.targetPinnings_.isEmpty()) {
          if (targetPinnings_.isEmpty()) {
            targetPinnings_ = other.targetPinnings_;
            targetPinnings_.makeImmutable();
            bitField0_ |= 0x00004000;
          } else {
            ensureTargetPinningsIsMutable();
            targetPinnings_.addAll(other.targetPinnings_);
          }
          onChanged();
        }
        if (other.hasStorageId()) {
          storageId_ = other.storageId_;
          bitField0_ |= 0x00008000;
          onChanged();
        }
        if (!other.targetStorageIds_.isEmpty()) {
          if (targetStorageIds_.isEmpty()) {
            targetStorageIds_ = other.targetStorageIds_;
            bitField0_ |= 0x00010000;
          } else {
            ensureTargetStorageIdsIsMutable();
            targetStorageIds_.addAll(other.targetStorageIds_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!hasStage()) {
          return false;
        }
        if (!hasPipelineSize()) {
          return false;
        }
        if (!hasMinBytesRcvd()) {
          return false;
        }
        if (!hasMaxBytesRcvd()) {
          return false;
        }
        if (!hasLatestGenerationStamp()) {
          return false;
        }
        if (!hasRequestedChecksum()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getTargetsCount(); i++) {
          if (!getTargets(i).isInitialized()) {
            return false;
          }
        }
        if (hasSource()) {
          if (!getSource().isInitialized()) {
            return false;
          }
        }
        if (!getRequestedChecksum().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER,
                        extensionRegistry);
                if (targetsBuilder_ == null) {
                  ensureTargetsIsMutable();
                  targets_.add(m);
                } else {
                  targetsBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getSourceFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(4, tmpRaw);
                } else {
                  stage_ = tmpRaw;
                  bitField0_ |= 0x00000008;
                }
                break;
              } // case 32
              case 40: {
                pipelineSize_ = input.readUInt32();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              case 48: {
                minBytesRcvd_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 56: {
                maxBytesRcvd_ = input.readUInt64();
                bitField0_ |= 0x00000040;
                break;
              } // case 56
              case 64: {
                latestGenerationStamp_ = input.readUInt64();
                bitField0_ |= 0x00000080;
                break;
              } // case 64
              case 74: {
                input.readMessage(
                    getRequestedChecksumFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000100;
                break;
              } // case 74
              case 82: {
                input.readMessage(
                    getCachingStrategyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000200;
                break;
              } // case 82
              case 88: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(11, tmpRaw);
                } else {
                  storageType_ = tmpRaw;
                  bitField0_ |= 0x00000400;
                }
                break;
              } // case 88
              case 96: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(12, tmpRaw);
                } else {
                  ensureTargetStorageTypesIsMutable();
                  targetStorageTypes_.add(tmpRaw);
                }
                break;
              } // case 96
              case 98: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(12, tmpRaw);
                  } else {
                    ensureTargetStorageTypesIsMutable();
                    targetStorageTypes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 98
              case 104: {
                allowLazyPersist_ = input.readBool();
                bitField0_ |= 0x00001000;
                break;
              } // case 104
              case 112: {
                pinning_ = input.readBool();
                bitField0_ |= 0x00002000;
                break;
              } // case 112
              case 120: {
                boolean v = input.readBool();
                ensureTargetPinningsIsMutable();
                targetPinnings_.addBoolean(v);
                break;
              } // case 120
              case 122: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                int alloc = length > 4096 ? 4096 : length;
                ensureTargetPinningsIsMutable(alloc / 1);
                while (input.getBytesUntilLimit() > 0) {
                  targetPinnings_.addBoolean(input.readBool());
                }
                input.popLimit(limit);
                break;
              } // case 122
              case 130: {
                storageId_ = input.readBytes();
                bitField0_ |= 0x00008000;
                break;
              } // case 130
              case 138: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureTargetStorageIdsIsMutable();
                targetStorageIds_.add(bs);
                break;
              } // case 138
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_ =
        java.util.Collections.emptyList();
      private void ensureTargetsIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(targets_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
        if (targetsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(targets_);
        } else {
          return targetsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public int getTargetsCount() {
        if (targetsBuilder_ == null) {
          return targets_.size();
        } else {
          return targetsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);
        } else {
          return targetsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.set(index, value);
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.set(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(index, value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addAllTargets(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, targets_);
          onChanged();
        } else {
          targetsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder clearTargets() {
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          targetsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder removeTargets(int index) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.remove(index);
          onChanged();
        } else {
          targetsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
          int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);  } else {
          return targetsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
           getTargetsOrBuilderList() {
        if (targetsBuilder_ != null) {
          return targetsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(targets_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
        return getTargetsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder> 
           getTargetsBuilderList() {
        return getTargetsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getTargetsFieldBuilder() {
        if (targetsBuilder_ == null) {
          targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  targets_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          targets_ = null;
        }
        return targetsBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       * @return Whether the source field is set.
       */
      public boolean hasSource() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       * @return The source.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
        if (sourceBuilder_ == null) {
          return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
        } else {
          return sourceBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (sourceBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          source_ = value;
        } else {
          sourceBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder setSource(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (sourceBuilder_ == null) {
          source_ = builderForValue.build();
        } else {
          sourceBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (sourceBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            source_ != null &&
            source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
            getSourceBuilder().mergeFrom(value);
          } else {
            source_ = value;
          }
        } else {
          sourceBuilder_.mergeFrom(value);
        }
        if (source_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder clearSource() {
        bitField0_ = (bitField0_ & ~0x00000004);
        source_ = null;
        if (sourceBuilder_ != null) {
          sourceBuilder_.dispose();
          sourceBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getSourceFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
        if (sourceBuilder_ != null) {
          return sourceBuilder_.getMessageOrBuilder();
        } else {
          return source_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getSourceFieldBuilder() {
        if (sourceBuilder_ == null) {
          sourceBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  getSource(),
                  getParentForChildren(),
                  isClean());
          source_ = null;
        }
        return sourceBuilder_;
      }

      private int stage_ = 0;
      /**
       * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
       * @return Whether the stage field is set.
       */
      @java.lang.Override public boolean hasStage() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
       * @return The stage.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage getStage() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.forNumber(stage_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage.PIPELINE_SETUP_APPEND : result;
      }
      /**
       * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
       * @param value The stage to set.
       * @return This builder for chaining.
       */
      public Builder setStage(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto.BlockConstructionStage value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000008;
        stage_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.OpWriteBlockProto.BlockConstructionStage stage = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearStage() {
        bitField0_ = (bitField0_ & ~0x00000008);
        stage_ = 0;
        onChanged();
        return this;
      }

      private int pipelineSize_ ;
      /**
       * <code>required uint32 pipelineSize = 5;</code>
       * @return Whether the pipelineSize field is set.
       */
      @java.lang.Override
      public boolean hasPipelineSize() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>required uint32 pipelineSize = 5;</code>
       * @return The pipelineSize.
       */
      @java.lang.Override
      public int getPipelineSize() {
        return pipelineSize_;
      }
      /**
       * <code>required uint32 pipelineSize = 5;</code>
       * @param value The pipelineSize to set.
       * @return This builder for chaining.
       */
      public Builder setPipelineSize(int value) {

        pipelineSize_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 pipelineSize = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearPipelineSize() {
        bitField0_ = (bitField0_ & ~0x00000010);
        pipelineSize_ = 0;
        onChanged();
        return this;
      }

      private long minBytesRcvd_ ;
      /**
       * <code>required uint64 minBytesRcvd = 6;</code>
       * @return Whether the minBytesRcvd field is set.
       */
      @java.lang.Override
      public boolean hasMinBytesRcvd() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required uint64 minBytesRcvd = 6;</code>
       * @return The minBytesRcvd.
       */
      @java.lang.Override
      public long getMinBytesRcvd() {
        return minBytesRcvd_;
      }
      /**
       * <code>required uint64 minBytesRcvd = 6;</code>
       * @param value The minBytesRcvd to set.
       * @return This builder for chaining.
       */
      public Builder setMinBytesRcvd(long value) {

        minBytesRcvd_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 minBytesRcvd = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearMinBytesRcvd() {
        bitField0_ = (bitField0_ & ~0x00000020);
        minBytesRcvd_ = 0L;
        onChanged();
        return this;
      }

      private long maxBytesRcvd_ ;
      /**
       * <code>required uint64 maxBytesRcvd = 7;</code>
       * @return Whether the maxBytesRcvd field is set.
       */
      @java.lang.Override
      public boolean hasMaxBytesRcvd() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>required uint64 maxBytesRcvd = 7;</code>
       * @return The maxBytesRcvd.
       */
      @java.lang.Override
      public long getMaxBytesRcvd() {
        return maxBytesRcvd_;
      }
      /**
       * <code>required uint64 maxBytesRcvd = 7;</code>
       * @param value The maxBytesRcvd to set.
       * @return This builder for chaining.
       */
      public Builder setMaxBytesRcvd(long value) {

        maxBytesRcvd_ = value;
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 maxBytesRcvd = 7;</code>
       * @return This builder for chaining.
       */
      public Builder clearMaxBytesRcvd() {
        bitField0_ = (bitField0_ & ~0x00000040);
        maxBytesRcvd_ = 0L;
        onChanged();
        return this;
      }

      private long latestGenerationStamp_ ;
      /**
       * <code>required uint64 latestGenerationStamp = 8;</code>
       * @return Whether the latestGenerationStamp field is set.
       */
      @java.lang.Override
      public boolean hasLatestGenerationStamp() {
        return ((bitField0_ & 0x00000080) != 0);
      }
      /**
       * <code>required uint64 latestGenerationStamp = 8;</code>
       * @return The latestGenerationStamp.
       */
      @java.lang.Override
      public long getLatestGenerationStamp() {
        return latestGenerationStamp_;
      }
      /**
       * <code>required uint64 latestGenerationStamp = 8;</code>
       * @param value The latestGenerationStamp to set.
       * @return This builder for chaining.
       */
      public Builder setLatestGenerationStamp(long value) {

        latestGenerationStamp_ = value;
        bitField0_ |= 0x00000080;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 latestGenerationStamp = 8;</code>
       * @return This builder for chaining.
       */
      public Builder clearLatestGenerationStamp() {
        bitField0_ = (bitField0_ & ~0x00000080);
        latestGenerationStamp_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto requestedChecksum_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> requestedChecksumBuilder_;
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       * @return Whether the requestedChecksum field is set.
       */
      public boolean hasRequestedChecksum() {
        return ((bitField0_ & 0x00000100) != 0);
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       * @return The requestedChecksum.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getRequestedChecksum() {
        if (requestedChecksumBuilder_ == null) {
          return requestedChecksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
        } else {
          return requestedChecksumBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public Builder setRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
        if (requestedChecksumBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          requestedChecksum_ = value;
        } else {
          requestedChecksumBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public Builder setRequestedChecksum(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
        if (requestedChecksumBuilder_ == null) {
          requestedChecksum_ = builderForValue.build();
        } else {
          requestedChecksumBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000100;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public Builder mergeRequestedChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
        if (requestedChecksumBuilder_ == null) {
          if (((bitField0_ & 0x00000100) != 0) &&
            requestedChecksum_ != null &&
            requestedChecksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
            getRequestedChecksumBuilder().mergeFrom(value);
          } else {
            requestedChecksum_ = value;
          }
        } else {
          requestedChecksumBuilder_.mergeFrom(value);
        }
        if (requestedChecksum_ != null) {
          bitField0_ |= 0x00000100;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public Builder clearRequestedChecksum() {
        bitField0_ = (bitField0_ & ~0x00000100);
        requestedChecksum_ = null;
        if (requestedChecksumBuilder_ != null) {
          requestedChecksumBuilder_.dispose();
          requestedChecksumBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getRequestedChecksumBuilder() {
        bitField0_ |= 0x00000100;
        onChanged();
        return getRequestedChecksumFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getRequestedChecksumOrBuilder() {
        if (requestedChecksumBuilder_ != null) {
          return requestedChecksumBuilder_.getMessageOrBuilder();
        } else {
          return requestedChecksum_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : requestedChecksum_;
        }
      }
      /**
       * <pre>
       **
       * The requested checksum mechanism for this block write.
       * </pre>
       *
       * <code>required .hadoop.hdfs.ChecksumProto requestedChecksum = 9;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> 
          getRequestedChecksumFieldBuilder() {
        if (requestedChecksumBuilder_ == null) {
          requestedChecksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
                  getRequestedChecksum(),
                  getParentForChildren(),
                  isClean());
          requestedChecksum_ = null;
        }
        return requestedChecksumBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto cachingStrategy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> cachingStrategyBuilder_;
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       * @return Whether the cachingStrategy field is set.
       */
      public boolean hasCachingStrategy() {
        return ((bitField0_ & 0x00000200) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       * @return The cachingStrategy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto getCachingStrategy() {
        if (cachingStrategyBuilder_ == null) {
          return cachingStrategy_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
        } else {
          return cachingStrategyBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public Builder setCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
        if (cachingStrategyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          cachingStrategy_ = value;
        } else {
          cachingStrategyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public Builder setCachingStrategy(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder builderForValue) {
        if (cachingStrategyBuilder_ == null) {
          cachingStrategy_ = builderForValue.build();
        } else {
          cachingStrategyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000200;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public Builder mergeCachingStrategy(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto value) {
        if (cachingStrategyBuilder_ == null) {
          if (((bitField0_ & 0x00000200) != 0) &&
            cachingStrategy_ != null &&
            cachingStrategy_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance()) {
            getCachingStrategyBuilder().mergeFrom(value);
          } else {
            cachingStrategy_ = value;
          }
        } else {
          cachingStrategyBuilder_.mergeFrom(value);
        }
        if (cachingStrategy_ != null) {
          bitField0_ |= 0x00000200;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public Builder clearCachingStrategy() {
        bitField0_ = (bitField0_ & ~0x00000200);
        cachingStrategy_ = null;
        if (cachingStrategyBuilder_ != null) {
          cachingStrategyBuilder_.dispose();
          cachingStrategyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder getCachingStrategyBuilder() {
        bitField0_ |= 0x00000200;
        onChanged();
        return getCachingStrategyFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder getCachingStrategyOrBuilder() {
        if (cachingStrategyBuilder_ != null) {
          return cachingStrategyBuilder_.getMessageOrBuilder();
        } else {
          return cachingStrategy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.getDefaultInstance() : cachingStrategy_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.CachingStrategyProto cachingStrategy = 10;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder> 
          getCachingStrategyFieldBuilder() {
        if (cachingStrategyBuilder_ == null) {
          cachingStrategyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProtoOrBuilder>(
                  getCachingStrategy(),
                  getParentForChildren(),
                  isClean());
          cachingStrategy_ = null;
        }
        return cachingStrategyBuilder_;
      }

      private int storageType_ = 1;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
       * @return Whether the storageType field is set.
       */
      @java.lang.Override public boolean hasStorageType() {
        return ((bitField0_ & 0x00000400) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
       * @return The storageType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
       * @param value The storageType to set.
       * @return This builder for chaining.
       */
      public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000400;
        storageType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 11 [default = DISK];</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageType() {
        bitField0_ = (bitField0_ & ~0x00000400);
        storageType_ = 1;
        onChanged();
        return this;
      }

      private java.util.List<java.lang.Integer> targetStorageTypes_ =
        java.util.Collections.emptyList();
      private void ensureTargetStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000800) != 0)) {
          targetStorageTypes_ = new java.util.ArrayList<java.lang.Integer>(targetStorageTypes_);
          bitField0_ |= 0x00000800;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @return A list containing the targetStorageTypes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @return The count of targetStorageTypes.
       */
      public int getTargetStorageTypesCount() {
        return targetStorageTypes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @param index The index of the element to return.
       * @return The targetStorageTypes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
        return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @param index The index to set the value at.
       * @param value The targetStorageTypes to set.
       * @return This builder for chaining.
       */
      public Builder setTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureTargetStorageTypesIsMutable();
        targetStorageTypes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @param value The targetStorageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureTargetStorageTypesIsMutable();
        targetStorageTypes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @param values The targetStorageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addAllTargetStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
        ensureTargetStorageTypesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
          targetStorageTypes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 12;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetStorageTypes() {
        targetStorageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000800);
        onChanged();
        return this;
      }

      private boolean allowLazyPersist_ ;
      /**
       * <pre>
       **
       * Hint to the DataNode that the block can be allocated on transient
       * storage i.e. memory and written to disk lazily. The DataNode is free
       * to ignore this hint.
       * </pre>
       *
       * <code>optional bool allowLazyPersist = 13 [default = false];</code>
       * @return Whether the allowLazyPersist field is set.
       */
      @java.lang.Override
      public boolean hasAllowLazyPersist() {
        return ((bitField0_ & 0x00001000) != 0);
      }
      /**
       * <pre>
       **
       * Hint to the DataNode that the block can be allocated on transient
       * storage i.e. memory and written to disk lazily. The DataNode is free
       * to ignore this hint.
       * </pre>
       *
       * <code>optional bool allowLazyPersist = 13 [default = false];</code>
       * @return The allowLazyPersist.
       */
      @java.lang.Override
      public boolean getAllowLazyPersist() {
        return allowLazyPersist_;
      }
      /**
       * <pre>
       **
       * Hint to the DataNode that the block can be allocated on transient
       * storage i.e. memory and written to disk lazily. The DataNode is free
       * to ignore this hint.
       * </pre>
       *
       * <code>optional bool allowLazyPersist = 13 [default = false];</code>
       * @param value The allowLazyPersist to set.
       * @return This builder for chaining.
       */
      public Builder setAllowLazyPersist(boolean value) {

        allowLazyPersist_ = value;
        bitField0_ |= 0x00001000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * Hint to the DataNode that the block can be allocated on transient
       * storage i.e. memory and written to disk lazily. The DataNode is free
       * to ignore this hint.
       * </pre>
       *
       * <code>optional bool allowLazyPersist = 13 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearAllowLazyPersist() {
        bitField0_ = (bitField0_ & ~0x00001000);
        allowLazyPersist_ = false;
        onChanged();
        return this;
      }

      private boolean pinning_ ;
      /**
       * <pre>
       *whether to pin the block, so Balancer won't move it.
       * </pre>
       *
       * <code>optional bool pinning = 14 [default = false];</code>
       * @return Whether the pinning field is set.
       */
      @java.lang.Override
      public boolean hasPinning() {
        return ((bitField0_ & 0x00002000) != 0);
      }
      /**
       * <pre>
       *whether to pin the block, so Balancer won't move it.
       * </pre>
       *
       * <code>optional bool pinning = 14 [default = false];</code>
       * @return The pinning.
       */
      @java.lang.Override
      public boolean getPinning() {
        return pinning_;
      }
      /**
       * <pre>
       *whether to pin the block, so Balancer won't move it.
       * </pre>
       *
       * <code>optional bool pinning = 14 [default = false];</code>
       * @param value The pinning to set.
       * @return This builder for chaining.
       */
      public Builder setPinning(boolean value) {

        pinning_ = value;
        bitField0_ |= 0x00002000;
        onChanged();
        return this;
      }
      /**
       * <pre>
       *whether to pin the block, so Balancer won't move it.
       * </pre>
       *
       * <code>optional bool pinning = 14 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearPinning() {
        bitField0_ = (bitField0_ & ~0x00002000);
        pinning_ = false;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.BooleanList targetPinnings_ = emptyBooleanList();
      private void ensureTargetPinningsIsMutable() {
        if (!targetPinnings_.isModifiable()) {
          targetPinnings_ = makeMutableCopy(targetPinnings_);
        }
        bitField0_ |= 0x00004000;
      }
      private void ensureTargetPinningsIsMutable(int capacity) {
        if (!targetPinnings_.isModifiable()) {
          targetPinnings_ = makeMutableCopy(targetPinnings_, capacity);
        }
        bitField0_ |= 0x00004000;
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @return A list containing the targetPinnings.
       */
      public java.util.List<java.lang.Boolean>
          getTargetPinningsList() {
        targetPinnings_.makeImmutable();
        return targetPinnings_;
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @return The count of targetPinnings.
       */
      public int getTargetPinningsCount() {
        return targetPinnings_.size();
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @param index The index of the element to return.
       * @return The targetPinnings at the given index.
       */
      public boolean getTargetPinnings(int index) {
        return targetPinnings_.getBoolean(index);
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @param index The index to set the value at.
       * @param value The targetPinnings to set.
       * @return This builder for chaining.
       */
      public Builder setTargetPinnings(
          int index, boolean value) {

        ensureTargetPinningsIsMutable();
        targetPinnings_.setBoolean(index, value);
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @param value The targetPinnings to add.
       * @return This builder for chaining.
       */
      public Builder addTargetPinnings(boolean value) {

        ensureTargetPinningsIsMutable();
        targetPinnings_.addBoolean(value);
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @param values The targetPinnings to add.
       * @return This builder for chaining.
       */
      public Builder addAllTargetPinnings(
          java.lang.Iterable<? extends java.lang.Boolean> values) {
        ensureTargetPinningsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, targetPinnings_);
        bitField0_ |= 0x00004000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated bool targetPinnings = 15;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetPinnings() {
        targetPinnings_ = emptyBooleanList();
        bitField0_ = (bitField0_ & ~0x00004000);
        onChanged();
        return this;
      }

      private java.lang.Object storageId_ = "";
      /**
       * <code>optional string storageId = 16;</code>
       * @return Whether the storageId field is set.
       */
      public boolean hasStorageId() {
        return ((bitField0_ & 0x00008000) != 0);
      }
      /**
       * <code>optional string storageId = 16;</code>
       * @return The storageId.
       */
      public java.lang.String getStorageId() {
        java.lang.Object ref = storageId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            storageId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string storageId = 16;</code>
       * @return The bytes for storageId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageIdBytes() {
        java.lang.Object ref = storageId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          storageId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string storageId = 16;</code>
       * @param value The storageId to set.
       * @return This builder for chaining.
       */
      public Builder setStorageId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        storageId_ = value;
        bitField0_ |= 0x00008000;
        onChanged();
        return this;
      }
      /**
       * <code>optional string storageId = 16;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageId() {
        storageId_ = getDefaultInstance().getStorageId();
        bitField0_ = (bitField0_ & ~0x00008000);
        onChanged();
        return this;
      }
      /**
       * <code>optional string storageId = 16;</code>
       * @param value The bytes for storageId to set.
       * @return This builder for chaining.
       */
      public Builder setStorageIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        storageId_ = value;
        bitField0_ |= 0x00008000;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureTargetStorageIdsIsMutable() {
        if (!targetStorageIds_.isModifiable()) {
          targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(targetStorageIds_);
        }
        bitField0_ |= 0x00010000;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @return A list containing the targetStorageIds.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getTargetStorageIdsList() {
        targetStorageIds_.makeImmutable();
        return targetStorageIds_;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @return The count of targetStorageIds.
       */
      public int getTargetStorageIdsCount() {
        return targetStorageIds_.size();
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param index The index of the element to return.
       * @return The targetStorageIds at the given index.
       */
      public java.lang.String getTargetStorageIds(int index) {
        return targetStorageIds_.get(index);
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param index The index of the value to return.
       * @return The bytes of the targetStorageIds at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getTargetStorageIdsBytes(int index) {
        return targetStorageIds_.getByteString(index);
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param index The index to set the value at.
       * @param value The targetStorageIds to set.
       * @return This builder for chaining.
       */
      public Builder setTargetStorageIds(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.set(index, value);
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param value The targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageIds(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.add(value);
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param values The targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addAllTargetStorageIds(
          java.lang.Iterable<java.lang.String> values) {
        ensureTargetStorageIdsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, targetStorageIds_);
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetStorageIds() {
        targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00010000);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 17;</code>
       * @param value The bytes of the targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageIdsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.add(value);
        bitField0_ |= 0x00010000;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpWriteBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpWriteBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpWriteBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpWriteBlockProto>() {
      @java.lang.Override
      public OpWriteBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpWriteBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpWriteBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpTransferBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpTransferBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> 
        getTargetsList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index);
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    int getTargetsCount();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getTargetsOrBuilderList();
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
        int index);

    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @return A list containing the targetStorageTypes.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @return The count of targetStorageTypes.
     */
    int getTargetStorageTypesCount();
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @param index The index of the element to return.
     * @return The targetStorageTypes at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index);

    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @return A list containing the targetStorageIds.
     */
    java.util.List<java.lang.String>
        getTargetStorageIdsList();
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @return The count of targetStorageIds.
     */
    int getTargetStorageIdsCount();
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @param index The index of the element to return.
     * @return The targetStorageIds at the given index.
     */
    java.lang.String getTargetStorageIds(int index);
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @param index The index of the value to return.
     * @return The bytes of the targetStorageIds at the given index.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getTargetStorageIdsBytes(int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
   */
  public static final class OpTransferBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpTransferBlockProto)
      OpTransferBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpTransferBlockProto.newBuilder() to construct.
    private OpTransferBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpTransferBlockProto() {
      targets_ = java.util.Collections.emptyList();
      targetStorageTypes_ = java.util.Collections.emptyList();
      targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpTransferBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
    }

    public static final int TARGETS_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_;
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
        getTargetsOrBuilderList() {
      return targets_;
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public int getTargetsCount() {
      return targets_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
      return targets_.get(index);
    }
    /**
     * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
        int index) {
      return targets_.get(index);
    }

    public static final int TARGETSTORAGETYPES_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> targetStorageTypes_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> targetStorageTypes_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>() {
              public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @return A list containing the targetStorageTypes.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @return The count of targetStorageTypes.
     */
    @java.lang.Override
    public int getTargetStorageTypesCount() {
      return targetStorageTypes_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
     * @param index The index of the element to return.
     * @return The targetStorageTypes at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
      return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
    }

    public static final int TARGETSTORAGEIDS_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList targetStorageIds_ =
        org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @return A list containing the targetStorageIds.
     */
    public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
        getTargetStorageIdsList() {
      return targetStorageIds_;
    }
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @return The count of targetStorageIds.
     */
    public int getTargetStorageIdsCount() {
      return targetStorageIds_.size();
    }
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @param index The index of the element to return.
     * @return The targetStorageIds at the given index.
     */
    public java.lang.String getTargetStorageIds(int index) {
      return targetStorageIds_.get(index);
    }
    /**
     * <code>repeated string targetStorageIds = 4;</code>
     * @param index The index of the value to return.
     * @return The bytes of the targetStorageIds at the given index.
     */
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getTargetStorageIdsBytes(int index) {
      return targetStorageIds_.getByteString(index);
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getTargetsCount(); i++) {
        if (!getTargets(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      for (int i = 0; i < targets_.size(); i++) {
        output.writeMessage(2, targets_.get(i));
      }
      for (int i = 0; i < targetStorageTypes_.size(); i++) {
        output.writeEnum(3, targetStorageTypes_.get(i));
      }
      for (int i = 0; i < targetStorageIds_.size(); i++) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 4, targetStorageIds_.getRaw(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      for (int i = 0; i < targets_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, targets_.get(i));
      }
      {
        int dataSize = 0;
        for (int i = 0; i < targetStorageTypes_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(targetStorageTypes_.get(i));
        }
        size += dataSize;
        size += 1 * targetStorageTypes_.size();
      }
      {
        int dataSize = 0;
        for (int i = 0; i < targetStorageIds_.size(); i++) {
          dataSize += computeStringSizeNoTag(targetStorageIds_.getRaw(i));
        }
        size += dataSize;
        size += 1 * getTargetStorageIdsList().size();
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (!getTargetsList()
          .equals(other.getTargetsList())) return false;
      if (!targetStorageTypes_.equals(other.targetStorageTypes_)) return false;
      if (!getTargetStorageIdsList()
          .equals(other.getTargetStorageIdsList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (getTargetsCount() > 0) {
        hash = (37 * hash) + TARGETS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetsList().hashCode();
      }
      if (getTargetStorageTypesCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGETYPES_FIELD_NUMBER;
        hash = (53 * hash) + targetStorageTypes_.hashCode();
      }
      if (getTargetStorageIdsCount() > 0) {
        hash = (37 * hash) + TARGETSTORAGEIDS_FIELD_NUMBER;
        hash = (53 * hash) + getTargetStorageIdsList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpTransferBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpTransferBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getTargetsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
        } else {
          targets_ = null;
          targetsBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000002);
        targetStorageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000004);
        targetStorageIds_ =
            org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result) {
        if (targetsBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0)) {
            targets_ = java.util.Collections.unmodifiableList(targets_);
            bitField0_ = (bitField0_ & ~0x00000002);
          }
          result.targets_ = targets_;
        } else {
          result.targets_ = targetsBuilder_.build();
        }
        if (((bitField0_ & 0x00000004) != 0)) {
          targetStorageTypes_ = java.util.Collections.unmodifiableList(targetStorageTypes_);
          bitField0_ = (bitField0_ & ~0x00000004);
        }
        result.targetStorageTypes_ = targetStorageTypes_;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          targetStorageIds_.makeImmutable();
          result.targetStorageIds_ = targetStorageIds_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (targetsBuilder_ == null) {
          if (!other.targets_.isEmpty()) {
            if (targets_.isEmpty()) {
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000002);
            } else {
              ensureTargetsIsMutable();
              targets_.addAll(other.targets_);
            }
            onChanged();
          }
        } else {
          if (!other.targets_.isEmpty()) {
            if (targetsBuilder_.isEmpty()) {
              targetsBuilder_.dispose();
              targetsBuilder_ = null;
              targets_ = other.targets_;
              bitField0_ = (bitField0_ & ~0x00000002);
              targetsBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getTargetsFieldBuilder() : null;
            } else {
              targetsBuilder_.addAllMessages(other.targets_);
            }
          }
        }
        if (!other.targetStorageTypes_.isEmpty()) {
          if (targetStorageTypes_.isEmpty()) {
            targetStorageTypes_ = other.targetStorageTypes_;
            bitField0_ = (bitField0_ & ~0x00000004);
          } else {
            ensureTargetStorageTypesIsMutable();
            targetStorageTypes_.addAll(other.targetStorageTypes_);
          }
          onChanged();
        }
        if (!other.targetStorageIds_.isEmpty()) {
          if (targetStorageIds_.isEmpty()) {
            targetStorageIds_ = other.targetStorageIds_;
            bitField0_ |= 0x00000008;
          } else {
            ensureTargetStorageIdsIsMutable();
            targetStorageIds_.addAll(other.targetStorageIds_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getTargetsCount(); i++) {
          if (!getTargets(i).isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto m =
                    input.readMessage(
                        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.PARSER,
                        extensionRegistry);
                if (targetsBuilder_ == null) {
                  ensureTargetsIsMutable();
                  targets_.add(m);
                } else {
                  targetsBuilder_.addMessage(m);
                }
                break;
              } // case 18
              case 24: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(3, tmpRaw);
                } else {
                  ensureTargetStorageTypesIsMutable();
                  targetStorageTypes_.add(tmpRaw);
                }
                break;
              } // case 24
              case 26: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(3, tmpRaw);
                  } else {
                    ensureTargetStorageTypesIsMutable();
                    targetStorageTypes_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 26
              case 34: {
                org.apache.hadoop.thirdparty.protobuf.ByteString bs = input.readBytes();
                ensureTargetStorageIdsIsMutable();
                targetStorageIds_.add(bs);
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ClientOperationHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> targets_ =
        java.util.Collections.emptyList();
      private void ensureTargetsIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          targets_ = new java.util.ArrayList<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto>(targets_);
          bitField0_ |= 0x00000002;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> targetsBuilder_;

      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> getTargetsList() {
        if (targetsBuilder_ == null) {
          return java.util.Collections.unmodifiableList(targets_);
        } else {
          return targetsBuilder_.getMessageList();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public int getTargetsCount() {
        if (targetsBuilder_ == null) {
          return targets_.size();
        } else {
          return targetsBuilder_.getCount();
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getTargets(int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);
        } else {
          return targetsBuilder_.getMessage(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.set(index, value);
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder setTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.set(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (targetsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureTargetsIsMutable();
          targets_.add(index, value);
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addTargets(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.add(index, builderForValue.build());
          onChanged();
        } else {
          targetsBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder addAllTargets(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto> values) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, targets_);
          onChanged();
        } else {
          targetsBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder clearTargets() {
        if (targetsBuilder_ == null) {
          targets_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000002);
          onChanged();
        } else {
          targetsBuilder_.clear();
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public Builder removeTargets(int index) {
        if (targetsBuilder_ == null) {
          ensureTargetsIsMutable();
          targets_.remove(index);
          onChanged();
        } else {
          targetsBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().getBuilder(index);
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getTargetsOrBuilder(
          int index) {
        if (targetsBuilder_ == null) {
          return targets_.get(index);  } else {
          return targetsBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
           getTargetsOrBuilderList() {
        if (targetsBuilder_ != null) {
          return targetsBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(targets_);
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder() {
        return getTargetsFieldBuilder().addBuilder(
            org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder addTargetsBuilder(
          int index) {
        return getTargetsFieldBuilder().addBuilder(
            index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance());
      }
      /**
       * <code>repeated .hadoop.hdfs.DatanodeInfoProto targets = 2;</code>
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder> 
           getTargetsBuilderList() {
        return getTargetsFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getTargetsFieldBuilder() {
        if (targetsBuilder_ == null) {
          targetsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  targets_,
                  ((bitField0_ & 0x00000002) != 0),
                  getParentForChildren(),
                  isClean());
          targets_ = null;
        }
        return targetsBuilder_;
      }

      private java.util.List<java.lang.Integer> targetStorageTypes_ =
        java.util.Collections.emptyList();
      private void ensureTargetStorageTypesIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          targetStorageTypes_ = new java.util.ArrayList<java.lang.Integer>(targetStorageTypes_);
          bitField0_ |= 0x00000004;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @return A list containing the targetStorageTypes.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> getTargetStorageTypesList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto>(targetStorageTypes_, targetStorageTypes_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @return The count of targetStorageTypes.
       */
      public int getTargetStorageTypesCount() {
        return targetStorageTypes_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @param index The index of the element to return.
       * @return The targetStorageTypes at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getTargetStorageTypes(int index) {
        return targetStorageTypes_converter_.convert(targetStorageTypes_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @param index The index to set the value at.
       * @param value The targetStorageTypes to set.
       * @return This builder for chaining.
       */
      public Builder setTargetStorageTypes(
          int index, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureTargetStorageTypesIsMutable();
        targetStorageTypes_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @param value The targetStorageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageTypes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureTargetStorageTypesIsMutable();
        targetStorageTypes_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @param values The targetStorageTypes to add.
       * @return This builder for chaining.
       */
      public Builder addAllTargetStorageTypes(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto> values) {
        ensureTargetStorageTypesIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value : values) {
          targetStorageTypes_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.StorageTypeProto targetStorageTypes = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetStorageTypes() {
        targetStorageTypes_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000004);
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
      private void ensureTargetStorageIdsIsMutable() {
        if (!targetStorageIds_.isModifiable()) {
          targetStorageIds_ = new org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList(targetStorageIds_);
        }
        bitField0_ |= 0x00000008;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @return A list containing the targetStorageIds.
       */
      public org.apache.hadoop.thirdparty.protobuf.ProtocolStringList
          getTargetStorageIdsList() {
        targetStorageIds_.makeImmutable();
        return targetStorageIds_;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @return The count of targetStorageIds.
       */
      public int getTargetStorageIdsCount() {
        return targetStorageIds_.size();
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param index The index of the element to return.
       * @return The targetStorageIds at the given index.
       */
      public java.lang.String getTargetStorageIds(int index) {
        return targetStorageIds_.get(index);
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param index The index of the value to return.
       * @return The bytes of the targetStorageIds at the given index.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getTargetStorageIdsBytes(int index) {
        return targetStorageIds_.getByteString(index);
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param index The index to set the value at.
       * @param value The targetStorageIds to set.
       * @return This builder for chaining.
       */
      public Builder setTargetStorageIds(
          int index, java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.set(index, value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param value The targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageIds(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.add(value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param values The targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addAllTargetStorageIds(
          java.lang.Iterable<java.lang.String> values) {
        ensureTargetStorageIdsIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, targetStorageIds_);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearTargetStorageIds() {
        targetStorageIds_ =
          org.apache.hadoop.thirdparty.protobuf.LazyStringArrayList.emptyList();
        bitField0_ = (bitField0_ & ~0x00000008);;
        onChanged();
        return this;
      }
      /**
       * <code>repeated string targetStorageIds = 4;</code>
       * @param value The bytes of the targetStorageIds to add.
       * @return This builder for chaining.
       */
      public Builder addTargetStorageIdsBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        ensureTargetStorageIdsIsMutable();
        targetStorageIds_.add(value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpTransferBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpTransferBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpTransferBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpTransferBlockProto>() {
      @java.lang.Override
      public OpTransferBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpTransferBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpTransferBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpReplaceBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpReplaceBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>required string delHint = 2;</code>
     * @return Whether the delHint field is set.
     */
    boolean hasDelHint();
    /**
     * <code>required string delHint = 2;</code>
     * @return The delHint.
     */
    java.lang.String getDelHint();
    /**
     * <code>required string delHint = 2;</code>
     * @return The bytes for delHint.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getDelHintBytes();

    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return Whether the source field is set.
     */
    boolean hasSource();
    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return The source.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource();
    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    boolean hasStorageType();
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
     * @return The storageType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType();

    /**
     * <code>optional string storageId = 5;</code>
     * @return Whether the storageId field is set.
     */
    boolean hasStorageId();
    /**
     * <code>optional string storageId = 5;</code>
     * @return The storageId.
     */
    java.lang.String getStorageId();
    /**
     * <code>optional string storageId = 5;</code>
     * @return The bytes for storageId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
   */
  public static final class OpReplaceBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpReplaceBlockProto)
      OpReplaceBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpReplaceBlockProto.newBuilder() to construct.
    private OpReplaceBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpReplaceBlockProto() {
      delHint_ = "";
      storageType_ = 1;
      storageId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpReplaceBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }

    public static final int DELHINT_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object delHint_ = "";
    /**
     * <code>required string delHint = 2;</code>
     * @return Whether the delHint field is set.
     */
    @java.lang.Override
    public boolean hasDelHint() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required string delHint = 2;</code>
     * @return The delHint.
     */
    @java.lang.Override
    public java.lang.String getDelHint() {
      java.lang.Object ref = delHint_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          delHint_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string delHint = 2;</code>
     * @return The bytes for delHint.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getDelHintBytes() {
      java.lang.Object ref = delHint_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        delHint_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SOURCE_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return Whether the source field is set.
     */
    @java.lang.Override
    public boolean hasSource() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     * @return The source.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
      return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
      return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
    }

    public static final int STORAGETYPE_FIELD_NUMBER = 4;
    private int storageType_ = 1;
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
     * @return Whether the storageType field is set.
     */
    @java.lang.Override public boolean hasStorageType() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
     * @return The storageType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
    }

    public static final int STORAGEID_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object storageId_ = "";
    /**
     * <code>optional string storageId = 5;</code>
     * @return Whether the storageId field is set.
     */
    @java.lang.Override
    public boolean hasStorageId() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional string storageId = 5;</code>
     * @return The storageId.
     */
    @java.lang.Override
    public java.lang.String getStorageId() {
      java.lang.Object ref = storageId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          storageId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string storageId = 5;</code>
     * @return The bytes for storageId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getStorageIdBytes() {
      java.lang.Object ref = storageId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        storageId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDelHint()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSource()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getSource().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, delHint_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getSource());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeEnum(4, storageType_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, storageId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, delHint_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getSource());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(4, storageType_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, storageId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (hasDelHint() != other.hasDelHint()) return false;
      if (hasDelHint()) {
        if (!getDelHint()
            .equals(other.getDelHint())) return false;
      }
      if (hasSource() != other.hasSource()) return false;
      if (hasSource()) {
        if (!getSource()
            .equals(other.getSource())) return false;
      }
      if (hasStorageType() != other.hasStorageType()) return false;
      if (hasStorageType()) {
        if (storageType_ != other.storageType_) return false;
      }
      if (hasStorageId() != other.hasStorageId()) return false;
      if (hasStorageId()) {
        if (!getStorageId()
            .equals(other.getStorageId())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (hasDelHint()) {
        hash = (37 * hash) + DELHINT_FIELD_NUMBER;
        hash = (53 * hash) + getDelHint().hashCode();
      }
      if (hasSource()) {
        hash = (37 * hash) + SOURCE_FIELD_NUMBER;
        hash = (53 * hash) + getSource().hashCode();
      }
      if (hasStorageType()) {
        hash = (37 * hash) + STORAGETYPE_FIELD_NUMBER;
        hash = (53 * hash) + storageType_;
      }
      if (hasStorageId()) {
        hash = (37 * hash) + STORAGEID_FIELD_NUMBER;
        hash = (53 * hash) + getStorageId().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpReplaceBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpReplaceBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getSourceFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        delHint_ = "";
        source_ = null;
        if (sourceBuilder_ != null) {
          sourceBuilder_.dispose();
          sourceBuilder_ = null;
        }
        storageType_ = 1;
        storageId_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.delHint_ = delHint_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.source_ = sourceBuilder_ == null
              ? source_
              : sourceBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.storageType_ = storageType_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.storageId_ = storageId_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (other.hasDelHint()) {
          delHint_ = other.delHint_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasSource()) {
          mergeSource(other.getSource());
        }
        if (other.hasStorageType()) {
          setStorageType(other.getStorageType());
        }
        if (other.hasStorageId()) {
          storageId_ = other.storageId_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!hasDelHint()) {
          return false;
        }
        if (!hasSource()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        if (!getSource().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                delHint_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getSourceFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(4, tmpRaw);
                } else {
                  storageType_ = tmpRaw;
                  bitField0_ |= 0x00000008;
                }
                break;
              } // case 32
              case 42: {
                storageId_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private java.lang.Object delHint_ = "";
      /**
       * <code>required string delHint = 2;</code>
       * @return Whether the delHint field is set.
       */
      public boolean hasDelHint() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required string delHint = 2;</code>
       * @return The delHint.
       */
      public java.lang.String getDelHint() {
        java.lang.Object ref = delHint_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            delHint_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string delHint = 2;</code>
       * @return The bytes for delHint.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getDelHintBytes() {
        java.lang.Object ref = delHint_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          delHint_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string delHint = 2;</code>
       * @param value The delHint to set.
       * @return This builder for chaining.
       */
      public Builder setDelHint(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        delHint_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required string delHint = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearDelHint() {
        delHint_ = getDefaultInstance().getDelHint();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>required string delHint = 2;</code>
       * @param value The bytes for delHint to set.
       * @return This builder for chaining.
       */
      public Builder setDelHintBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        delHint_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto source_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> sourceBuilder_;
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       * @return Whether the source field is set.
       */
      public boolean hasSource() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       * @return The source.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto getSource() {
        if (sourceBuilder_ == null) {
          return source_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
        } else {
          return sourceBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder setSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (sourceBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          source_ = value;
        } else {
          sourceBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder setSource(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder builderForValue) {
        if (sourceBuilder_ == null) {
          source_ = builderForValue.build();
        } else {
          sourceBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder mergeSource(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto value) {
        if (sourceBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            source_ != null &&
            source_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance()) {
            getSourceBuilder().mergeFrom(value);
          } else {
            source_ = value;
          }
        } else {
          sourceBuilder_.mergeFrom(value);
        }
        if (source_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public Builder clearSource() {
        bitField0_ = (bitField0_ & ~0x00000004);
        source_ = null;
        if (sourceBuilder_ != null) {
          sourceBuilder_.dispose();
          sourceBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder getSourceBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getSourceFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder getSourceOrBuilder() {
        if (sourceBuilder_ != null) {
          return sourceBuilder_.getMessageOrBuilder();
        } else {
          return source_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.getDefaultInstance() : source_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfoProto source = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder> 
          getSourceFieldBuilder() {
        if (sourceBuilder_ == null) {
          sourceBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProtoOrBuilder>(
                  getSource(),
                  getParentForChildren(),
                  isClean());
          source_ = null;
        }
        return sourceBuilder_;
      }

      private int storageType_ = 1;
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
       * @return Whether the storageType field is set.
       */
      @java.lang.Override public boolean hasStorageType() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
       * @return The storageType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto getStorageType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.forNumber(storageType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto.DISK : result;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
       * @param value The storageType to set.
       * @return This builder for chaining.
       */
      public Builder setStorageType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000008;
        storageType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.StorageTypeProto storageType = 4 [default = DISK];</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageType() {
        bitField0_ = (bitField0_ & ~0x00000008);
        storageType_ = 1;
        onChanged();
        return this;
      }

      private java.lang.Object storageId_ = "";
      /**
       * <code>optional string storageId = 5;</code>
       * @return Whether the storageId field is set.
       */
      public boolean hasStorageId() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional string storageId = 5;</code>
       * @return The storageId.
       */
      public java.lang.String getStorageId() {
        java.lang.Object ref = storageId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            storageId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string storageId = 5;</code>
       * @return The bytes for storageId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getStorageIdBytes() {
        java.lang.Object ref = storageId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          storageId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string storageId = 5;</code>
       * @param value The storageId to set.
       * @return This builder for chaining.
       */
      public Builder setStorageId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        storageId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional string storageId = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearStorageId() {
        storageId_ = getDefaultInstance().getStorageId();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * <code>optional string storageId = 5;</code>
       * @param value The bytes for storageId to set.
       * @return This builder for chaining.
       */
      public Builder setStorageIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        storageId_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpReplaceBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpReplaceBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpReplaceBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpReplaceBlockProto>() {
      @java.lang.Override
      public OpReplaceBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpReplaceBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpReplaceBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpCopyBlockProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpCopyBlockProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
   */
  public static final class OpCopyBlockProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpCopyBlockProto)
      OpCopyBlockProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpCopyBlockProto.newBuilder() to construct.
    private OpCopyBlockProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpCopyBlockProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpCopyBlockProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpCopyBlockProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpCopyBlockProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCopyBlockProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCopyBlockProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpCopyBlockProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpCopyBlockProto>() {
      @java.lang.Override
      public OpCopyBlockProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpCopyBlockProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpCopyBlockProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpBlockChecksumProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockChecksumProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    boolean hasBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     * @return The blockChecksumOptions.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
   */
  public static final class OpBlockChecksumProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockChecksumProto)
      OpBlockChecksumProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpBlockChecksumProto.newBuilder() to construct.
    private OpBlockChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpBlockChecksumProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpBlockChecksumProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }

    public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    @java.lang.Override
    public boolean hasBlockChecksumOptions() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     * @return The blockChecksumOptions.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getBlockChecksumOptions());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getBlockChecksumOptions());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
      if (hasBlockChecksumOptions()) {
        if (!getBlockChecksumOptions()
            .equals(other.getBlockChecksumOptions())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (hasBlockChecksumOptions()) {
        hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockChecksumOptions().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpBlockChecksumProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockChecksumProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getBlockChecksumOptionsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.blockChecksumOptions_ = blockChecksumOptionsBuilder_ == null
              ? blockChecksumOptions_
              : blockChecksumOptionsBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (other.hasBlockChecksumOptions()) {
          mergeBlockChecksumOptions(other.getBlockChecksumOptions());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getBlockChecksumOptionsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       * @return Whether the blockChecksumOptions field is set.
       */
      public boolean hasBlockChecksumOptions() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       * @return The blockChecksumOptions.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
        if (blockChecksumOptionsBuilder_ == null) {
          return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        } else {
          return blockChecksumOptionsBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blockChecksumOptions_ = value;
        } else {
          blockChecksumOptionsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public Builder setBlockChecksumOptions(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptions_ = builderForValue.build();
        } else {
          blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            blockChecksumOptions_ != null &&
            blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
            getBlockChecksumOptionsBuilder().mergeFrom(value);
          } else {
            blockChecksumOptions_ = value;
          }
        } else {
          blockChecksumOptionsBuilder_.mergeFrom(value);
        }
        if (blockChecksumOptions_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public Builder clearBlockChecksumOptions() {
        bitField0_ = (bitField0_ & ~0x00000002);
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getBlockChecksumOptionsFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
        if (blockChecksumOptionsBuilder_ != null) {
          return blockChecksumOptionsBuilder_.getMessageOrBuilder();
        } else {
          return blockChecksumOptions_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> 
          getBlockChecksumOptionsFieldBuilder() {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
                  getBlockChecksumOptions(),
                  getParentForChildren(),
                  isClean());
          blockChecksumOptions_ = null;
        }
        return blockChecksumOptionsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpBlockChecksumProto>() {
      @java.lang.Override
      public OpBlockChecksumProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpBlockGroupChecksumProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockGroupChecksumProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     * @return Whether the datanodes field is set.
     */
    boolean hasDatanodes();
    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     * @return The datanodes.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes();
    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder();

    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> 
        getBlockTokensList();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index);
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    int getBlockTokensCount();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
        getBlockTokensOrBuilderList();
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
        int index);

    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     * @return Whether the ecPolicy field is set.
     */
    boolean hasEcPolicy();
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     * @return The ecPolicy.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy();
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder();

    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @return A list containing the blockIndices.
     */
    java.util.List<java.lang.Integer> getBlockIndicesList();
    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @return The count of blockIndices.
     */
    int getBlockIndicesCount();
    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @param index The index of the element to return.
     * @return The blockIndices at the given index.
     */
    int getBlockIndices(int index);

    /**
     * <code>required uint64 requestedNumBytes = 6;</code>
     * @return Whether the requestedNumBytes field is set.
     */
    boolean hasRequestedNumBytes();
    /**
     * <code>required uint64 requestedNumBytes = 6;</code>
     * @return The requestedNumBytes.
     */
    long getRequestedNumBytes();

    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    boolean hasBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     * @return The blockChecksumOptions.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
   */
  public static final class OpBlockGroupChecksumProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockGroupChecksumProto)
      OpBlockGroupChecksumProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpBlockGroupChecksumProto.newBuilder() to construct.
    private OpBlockGroupChecksumProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpBlockGroupChecksumProto() {
      blockTokens_ = java.util.Collections.emptyList();
      blockIndices_ = emptyIntList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpBlockGroupChecksumProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }

    public static final int DATANODES_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_;
    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     * @return Whether the datanodes field is set.
     */
    @java.lang.Override
    public boolean hasDatanodes() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     * @return The datanodes.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
      return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
    }
    /**
     * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
      return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
    }

    public static final int BLOCKTOKENS_FIELD_NUMBER = 3;
    @SuppressWarnings("serial")
    private java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> blockTokens_;
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> getBlockTokensList() {
      return blockTokens_;
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    @java.lang.Override
    public java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
        getBlockTokensOrBuilderList() {
      return blockTokens_;
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    @java.lang.Override
    public int getBlockTokensCount() {
      return blockTokens_.size();
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
      return blockTokens_.get(index);
    }
    /**
     * <pre>
     * each internal block has a block token
     * </pre>
     *
     * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
        int index) {
      return blockTokens_.get(index);
    }

    public static final int ECPOLICY_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     * @return Whether the ecPolicy field is set.
     */
    @java.lang.Override
    public boolean hasEcPolicy() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     * @return The ecPolicy.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }
    /**
     * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
      return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
    }

    public static final int BLOCKINDICES_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.IntList blockIndices_ =
        emptyIntList();
    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @return A list containing the blockIndices.
     */
    @java.lang.Override
    public java.util.List<java.lang.Integer>
        getBlockIndicesList() {
      return blockIndices_;
    }
    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @return The count of blockIndices.
     */
    public int getBlockIndicesCount() {
      return blockIndices_.size();
    }
    /**
     * <code>repeated uint32 blockIndices = 5;</code>
     * @param index The index of the element to return.
     * @return The blockIndices at the given index.
     */
    public int getBlockIndices(int index) {
      return blockIndices_.getInt(index);
    }

    public static final int REQUESTEDNUMBYTES_FIELD_NUMBER = 6;
    private long requestedNumBytes_ = 0L;
    /**
     * <code>required uint64 requestedNumBytes = 6;</code>
     * @return Whether the requestedNumBytes field is set.
     */
    @java.lang.Override
    public boolean hasRequestedNumBytes() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required uint64 requestedNumBytes = 6;</code>
     * @return The requestedNumBytes.
     */
    @java.lang.Override
    public long getRequestedNumBytes() {
      return requestedNumBytes_;
    }

    public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 7;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    @java.lang.Override
    public boolean hasBlockChecksumOptions() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     * @return The blockChecksumOptions.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDatanodes()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasEcPolicy()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasRequestedNumBytes()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getDatanodes().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      for (int i = 0; i < getBlockTokensCount(); i++) {
        if (!getBlockTokens(i).isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (!getEcPolicy().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getDatanodes());
      }
      for (int i = 0; i < blockTokens_.size(); i++) {
        output.writeMessage(3, blockTokens_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(4, getEcPolicy());
      }
      for (int i = 0; i < blockIndices_.size(); i++) {
        output.writeUInt32(5, blockIndices_.getInt(i));
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeUInt64(6, requestedNumBytes_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(7, getBlockChecksumOptions());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getDatanodes());
      }
      for (int i = 0; i < blockTokens_.size(); i++) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, blockTokens_.get(i));
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getEcPolicy());
      }
      {
        int dataSize = 0;
        for (int i = 0; i < blockIndices_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32SizeNoTag(blockIndices_.getInt(i));
        }
        size += dataSize;
        size += 1 * getBlockIndicesList().size();
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(6, requestedNumBytes_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(7, getBlockChecksumOptions());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (hasDatanodes() != other.hasDatanodes()) return false;
      if (hasDatanodes()) {
        if (!getDatanodes()
            .equals(other.getDatanodes())) return false;
      }
      if (!getBlockTokensList()
          .equals(other.getBlockTokensList())) return false;
      if (hasEcPolicy() != other.hasEcPolicy()) return false;
      if (hasEcPolicy()) {
        if (!getEcPolicy()
            .equals(other.getEcPolicy())) return false;
      }
      if (!getBlockIndicesList()
          .equals(other.getBlockIndicesList())) return false;
      if (hasRequestedNumBytes() != other.hasRequestedNumBytes()) return false;
      if (hasRequestedNumBytes()) {
        if (getRequestedNumBytes()
            != other.getRequestedNumBytes()) return false;
      }
      if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
      if (hasBlockChecksumOptions()) {
        if (!getBlockChecksumOptions()
            .equals(other.getBlockChecksumOptions())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (hasDatanodes()) {
        hash = (37 * hash) + DATANODES_FIELD_NUMBER;
        hash = (53 * hash) + getDatanodes().hashCode();
      }
      if (getBlockTokensCount() > 0) {
        hash = (37 * hash) + BLOCKTOKENS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockTokensList().hashCode();
      }
      if (hasEcPolicy()) {
        hash = (37 * hash) + ECPOLICY_FIELD_NUMBER;
        hash = (53 * hash) + getEcPolicy().hashCode();
      }
      if (getBlockIndicesCount() > 0) {
        hash = (37 * hash) + BLOCKINDICES_FIELD_NUMBER;
        hash = (53 * hash) + getBlockIndicesList().hashCode();
      }
      if (hasRequestedNumBytes()) {
        hash = (37 * hash) + REQUESTEDNUMBYTES_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getRequestedNumBytes());
      }
      if (hasBlockChecksumOptions()) {
        hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockChecksumOptions().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpBlockGroupChecksumProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockGroupChecksumProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getDatanodesFieldBuilder();
          getBlockTokensFieldBuilder();
          getEcPolicyFieldBuilder();
          getBlockChecksumOptionsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        datanodes_ = null;
        if (datanodesBuilder_ != null) {
          datanodesBuilder_.dispose();
          datanodesBuilder_ = null;
        }
        if (blockTokensBuilder_ == null) {
          blockTokens_ = java.util.Collections.emptyList();
        } else {
          blockTokens_ = null;
          blockTokensBuilder_.clear();
        }
        bitField0_ = (bitField0_ & ~0x00000004);
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        blockIndices_ = emptyIntList();
        requestedNumBytes_ = 0L;
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result) {
        if (blockTokensBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0)) {
            blockTokens_ = java.util.Collections.unmodifiableList(blockTokens_);
            bitField0_ = (bitField0_ & ~0x00000004);
          }
          result.blockTokens_ = blockTokens_;
        } else {
          result.blockTokens_ = blockTokensBuilder_.build();
        }
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.datanodes_ = datanodesBuilder_ == null
              ? datanodes_
              : datanodesBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.ecPolicy_ = ecPolicyBuilder_ == null
              ? ecPolicy_
              : ecPolicyBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          blockIndices_.makeImmutable();
          result.blockIndices_ = blockIndices_;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.requestedNumBytes_ = requestedNumBytes_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000040) != 0)) {
          result.blockChecksumOptions_ = blockChecksumOptionsBuilder_ == null
              ? blockChecksumOptions_
              : blockChecksumOptionsBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (other.hasDatanodes()) {
          mergeDatanodes(other.getDatanodes());
        }
        if (blockTokensBuilder_ == null) {
          if (!other.blockTokens_.isEmpty()) {
            if (blockTokens_.isEmpty()) {
              blockTokens_ = other.blockTokens_;
              bitField0_ = (bitField0_ & ~0x00000004);
            } else {
              ensureBlockTokensIsMutable();
              blockTokens_.addAll(other.blockTokens_);
            }
            onChanged();
          }
        } else {
          if (!other.blockTokens_.isEmpty()) {
            if (blockTokensBuilder_.isEmpty()) {
              blockTokensBuilder_.dispose();
              blockTokensBuilder_ = null;
              blockTokens_ = other.blockTokens_;
              bitField0_ = (bitField0_ & ~0x00000004);
              blockTokensBuilder_ = 
                org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                   getBlockTokensFieldBuilder() : null;
            } else {
              blockTokensBuilder_.addAllMessages(other.blockTokens_);
            }
          }
        }
        if (other.hasEcPolicy()) {
          mergeEcPolicy(other.getEcPolicy());
        }
        if (!other.blockIndices_.isEmpty()) {
          if (blockIndices_.isEmpty()) {
            blockIndices_ = other.blockIndices_;
            blockIndices_.makeImmutable();
            bitField0_ |= 0x00000010;
          } else {
            ensureBlockIndicesIsMutable();
            blockIndices_.addAll(other.blockIndices_);
          }
          onChanged();
        }
        if (other.hasRequestedNumBytes()) {
          setRequestedNumBytes(other.getRequestedNumBytes());
        }
        if (other.hasBlockChecksumOptions()) {
          mergeBlockChecksumOptions(other.getBlockChecksumOptions());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!hasDatanodes()) {
          return false;
        }
        if (!hasEcPolicy()) {
          return false;
        }
        if (!hasRequestedNumBytes()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        if (!getDatanodes().isInitialized()) {
          return false;
        }
        for (int i = 0; i < getBlockTokensCount(); i++) {
          if (!getBlockTokens(i).isInitialized()) {
            return false;
          }
        }
        if (!getEcPolicy().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getDatanodesFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                org.apache.hadoop.security.proto.SecurityProtos.TokenProto m =
                    input.readMessage(
                        org.apache.hadoop.security.proto.SecurityProtos.TokenProto.PARSER,
                        extensionRegistry);
                if (blockTokensBuilder_ == null) {
                  ensureBlockTokensIsMutable();
                  blockTokens_.add(m);
                } else {
                  blockTokensBuilder_.addMessage(m);
                }
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getEcPolicyFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 40: {
                int v = input.readUInt32();
                ensureBlockIndicesIsMutable();
                blockIndices_.addInt(v);
                break;
              } // case 40
              case 42: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureBlockIndicesIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  blockIndices_.addInt(input.readUInt32());
                }
                input.popLimit(limit);
                break;
              } // case 42
              case 48: {
                requestedNumBytes_ = input.readUInt64();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              case 58: {
                input.readMessage(
                    getBlockChecksumOptionsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000040;
                break;
              } // case 58
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto datanodes_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> datanodesBuilder_;
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       * @return Whether the datanodes field is set.
       */
      public boolean hasDatanodes() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       * @return The datanodes.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto getDatanodes() {
        if (datanodesBuilder_ == null) {
          return datanodes_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
        } else {
          return datanodesBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public Builder setDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
        if (datanodesBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          datanodes_ = value;
        } else {
          datanodesBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public Builder setDatanodes(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder builderForValue) {
        if (datanodesBuilder_ == null) {
          datanodes_ = builderForValue.build();
        } else {
          datanodesBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public Builder mergeDatanodes(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto value) {
        if (datanodesBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            datanodes_ != null &&
            datanodes_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance()) {
            getDatanodesBuilder().mergeFrom(value);
          } else {
            datanodes_ = value;
          }
        } else {
          datanodesBuilder_.mergeFrom(value);
        }
        if (datanodes_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public Builder clearDatanodes() {
        bitField0_ = (bitField0_ & ~0x00000002);
        datanodes_ = null;
        if (datanodesBuilder_ != null) {
          datanodesBuilder_.dispose();
          datanodesBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder getDatanodesBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getDatanodesFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder getDatanodesOrBuilder() {
        if (datanodesBuilder_ != null) {
          return datanodesBuilder_.getMessageOrBuilder();
        } else {
          return datanodes_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.getDefaultInstance() : datanodes_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.DatanodeInfosProto datanodes = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder> 
          getDatanodesFieldBuilder() {
        if (datanodesBuilder_ == null) {
          datanodesBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProtoOrBuilder>(
                  getDatanodes(),
                  getParentForChildren(),
                  isClean());
          datanodes_ = null;
        }
        return datanodesBuilder_;
      }

      private java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> blockTokens_ =
        java.util.Collections.emptyList();
      private void ensureBlockTokensIsMutable() {
        if (!((bitField0_ & 0x00000004) != 0)) {
          blockTokens_ = new java.util.ArrayList<org.apache.hadoop.security.proto.SecurityProtos.TokenProto>(blockTokens_);
          bitField0_ |= 0x00000004;
         }
      }

      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> blockTokensBuilder_;

      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto> getBlockTokensList() {
        if (blockTokensBuilder_ == null) {
          return java.util.Collections.unmodifiableList(blockTokens_);
        } else {
          return blockTokensBuilder_.getMessageList();
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public int getBlockTokensCount() {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.size();
        } else {
          return blockTokensBuilder_.getCount();
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto getBlockTokens(int index) {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.get(index);
        } else {
          return blockTokensBuilder_.getMessage(index);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder setBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.set(index, value);
          onChanged();
        } else {
          blockTokensBuilder_.setMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder setBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.set(index, builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.setMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder addBlockTokens(org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.add(value);
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder addBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto value) {
        if (blockTokensBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ensureBlockTokensIsMutable();
          blockTokens_.add(index, value);
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(index, value);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder addBlockTokens(
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.add(builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder addBlockTokens(
          int index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder builderForValue) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.add(index, builderForValue.build());
          onChanged();
        } else {
          blockTokensBuilder_.addMessage(index, builderForValue.build());
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder addAllBlockTokens(
          java.lang.Iterable<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProto> values) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
              values, blockTokens_);
          onChanged();
        } else {
          blockTokensBuilder_.addAllMessages(values);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder clearBlockTokens() {
        if (blockTokensBuilder_ == null) {
          blockTokens_ = java.util.Collections.emptyList();
          bitField0_ = (bitField0_ & ~0x00000004);
          onChanged();
        } else {
          blockTokensBuilder_.clear();
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public Builder removeBlockTokens(int index) {
        if (blockTokensBuilder_ == null) {
          ensureBlockTokensIsMutable();
          blockTokens_.remove(index);
          onChanged();
        } else {
          blockTokensBuilder_.remove(index);
        }
        return this;
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder getBlockTokensBuilder(
          int index) {
        return getBlockTokensFieldBuilder().getBuilder(index);
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder getBlockTokensOrBuilder(
          int index) {
        if (blockTokensBuilder_ == null) {
          return blockTokens_.get(index);  } else {
          return blockTokensBuilder_.getMessageOrBuilder(index);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public java.util.List<? extends org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
           getBlockTokensOrBuilderList() {
        if (blockTokensBuilder_ != null) {
          return blockTokensBuilder_.getMessageOrBuilderList();
        } else {
          return java.util.Collections.unmodifiableList(blockTokens_);
        }
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder() {
        return getBlockTokensFieldBuilder().addBuilder(
            org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder addBlockTokensBuilder(
          int index) {
        return getBlockTokensFieldBuilder().addBuilder(
            index, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.getDefaultInstance());
      }
      /**
       * <pre>
       * each internal block has a block token
       * </pre>
       *
       * <code>repeated .hadoop.common.TokenProto blockTokens = 3;</code>
       */
      public java.util.List<org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder> 
           getBlockTokensBuilderList() {
        return getBlockTokensFieldBuilder().getBuilderList();
      }
      private org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
          org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder> 
          getBlockTokensFieldBuilder() {
        if (blockTokensBuilder_ == null) {
          blockTokensBuilder_ = new org.apache.hadoop.thirdparty.protobuf.RepeatedFieldBuilderV3<
              org.apache.hadoop.security.proto.SecurityProtos.TokenProto, org.apache.hadoop.security.proto.SecurityProtos.TokenProto.Builder, org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder>(
                  blockTokens_,
                  ((bitField0_ & 0x00000004) != 0),
                  getParentForChildren(),
                  isClean());
          blockTokens_ = null;
        }
        return blockTokensBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto ecPolicy_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> ecPolicyBuilder_;
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       * @return Whether the ecPolicy field is set.
       */
      public boolean hasEcPolicy() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       * @return The ecPolicy.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto getEcPolicy() {
        if (ecPolicyBuilder_ == null) {
          return ecPolicy_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        } else {
          return ecPolicyBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public Builder setEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          ecPolicy_ = value;
        } else {
          ecPolicyBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public Builder setEcPolicy(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder builderForValue) {
        if (ecPolicyBuilder_ == null) {
          ecPolicy_ = builderForValue.build();
        } else {
          ecPolicyBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public Builder mergeEcPolicy(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto value) {
        if (ecPolicyBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            ecPolicy_ != null &&
            ecPolicy_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance()) {
            getEcPolicyBuilder().mergeFrom(value);
          } else {
            ecPolicy_ = value;
          }
        } else {
          ecPolicyBuilder_.mergeFrom(value);
        }
        if (ecPolicy_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public Builder clearEcPolicy() {
        bitField0_ = (bitField0_ & ~0x00000008);
        ecPolicy_ = null;
        if (ecPolicyBuilder_ != null) {
          ecPolicyBuilder_.dispose();
          ecPolicyBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder getEcPolicyBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getEcPolicyFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder getEcPolicyOrBuilder() {
        if (ecPolicyBuilder_ != null) {
          return ecPolicyBuilder_.getMessageOrBuilder();
        } else {
          return ecPolicy_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance() : ecPolicy_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ErasureCodingPolicyProto ecPolicy = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder> 
          getEcPolicyFieldBuilder() {
        if (ecPolicyBuilder_ == null) {
          ecPolicyBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProtoOrBuilder>(
                  getEcPolicy(),
                  getParentForChildren(),
                  isClean());
          ecPolicy_ = null;
        }
        return ecPolicyBuilder_;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.IntList blockIndices_ = emptyIntList();
      private void ensureBlockIndicesIsMutable() {
        if (!blockIndices_.isModifiable()) {
          blockIndices_ = makeMutableCopy(blockIndices_);
        }
        bitField0_ |= 0x00000010;
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @return A list containing the blockIndices.
       */
      public java.util.List<java.lang.Integer>
          getBlockIndicesList() {
        blockIndices_.makeImmutable();
        return blockIndices_;
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @return The count of blockIndices.
       */
      public int getBlockIndicesCount() {
        return blockIndices_.size();
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @param index The index of the element to return.
       * @return The blockIndices at the given index.
       */
      public int getBlockIndices(int index) {
        return blockIndices_.getInt(index);
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @param index The index to set the value at.
       * @param value The blockIndices to set.
       * @return This builder for chaining.
       */
      public Builder setBlockIndices(
          int index, int value) {

        ensureBlockIndicesIsMutable();
        blockIndices_.setInt(index, value);
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @param value The blockIndices to add.
       * @return This builder for chaining.
       */
      public Builder addBlockIndices(int value) {

        ensureBlockIndicesIsMutable();
        blockIndices_.addInt(value);
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @param values The blockIndices to add.
       * @return This builder for chaining.
       */
      public Builder addAllBlockIndices(
          java.lang.Iterable<? extends java.lang.Integer> values) {
        ensureBlockIndicesIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, blockIndices_);
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 blockIndices = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockIndices() {
        blockIndices_ = emptyIntList();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }

      private long requestedNumBytes_ ;
      /**
       * <code>required uint64 requestedNumBytes = 6;</code>
       * @return Whether the requestedNumBytes field is set.
       */
      @java.lang.Override
      public boolean hasRequestedNumBytes() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <code>required uint64 requestedNumBytes = 6;</code>
       * @return The requestedNumBytes.
       */
      @java.lang.Override
      public long getRequestedNumBytes() {
        return requestedNumBytes_;
      }
      /**
       * <code>required uint64 requestedNumBytes = 6;</code>
       * @param value The requestedNumBytes to set.
       * @return This builder for chaining.
       */
      public Builder setRequestedNumBytes(long value) {

        requestedNumBytes_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 requestedNumBytes = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearRequestedNumBytes() {
        bitField0_ = (bitField0_ & ~0x00000020);
        requestedNumBytes_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       * @return Whether the blockChecksumOptions field is set.
       */
      public boolean hasBlockChecksumOptions() {
        return ((bitField0_ & 0x00000040) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       * @return The blockChecksumOptions.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
        if (blockChecksumOptionsBuilder_ == null) {
          return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        } else {
          return blockChecksumOptionsBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blockChecksumOptions_ = value;
        } else {
          blockChecksumOptionsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public Builder setBlockChecksumOptions(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptions_ = builderForValue.build();
        } else {
          blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000040;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (((bitField0_ & 0x00000040) != 0) &&
            blockChecksumOptions_ != null &&
            blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
            getBlockChecksumOptionsBuilder().mergeFrom(value);
          } else {
            blockChecksumOptions_ = value;
          }
        } else {
          blockChecksumOptionsBuilder_.mergeFrom(value);
        }
        if (blockChecksumOptions_ != null) {
          bitField0_ |= 0x00000040;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public Builder clearBlockChecksumOptions() {
        bitField0_ = (bitField0_ & ~0x00000040);
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
        bitField0_ |= 0x00000040;
        onChanged();
        return getBlockChecksumOptionsFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
        if (blockChecksumOptionsBuilder_ != null) {
          return blockChecksumOptionsBuilder_.getMessageOrBuilder();
        } else {
          return blockChecksumOptions_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 7;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> 
          getBlockChecksumOptionsFieldBuilder() {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
                  getBlockChecksumOptions(),
                  getParentForChildren(),
                  isClean());
          blockChecksumOptions_ = null;
        }
        return blockChecksumOptionsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockGroupChecksumProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockGroupChecksumProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpBlockGroupChecksumProto>() {
      @java.lang.Override
      public OpBlockGroupChecksumProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockGroupChecksumProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockGroupChecksumProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ShortCircuitShmIdProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmIdProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required int64 hi = 1;</code>
     * @return Whether the hi field is set.
     */
    boolean hasHi();
    /**
     * <code>required int64 hi = 1;</code>
     * @return The hi.
     */
    long getHi();

    /**
     * <code>required int64 lo = 2;</code>
     * @return Whether the lo field is set.
     */
    boolean hasLo();
    /**
     * <code>required int64 lo = 2;</code>
     * @return The lo.
     */
    long getLo();
  }
  /**
   * <pre>
   **
   * An ID uniquely identifying a shared memory segment.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
   */
  public static final class ShortCircuitShmIdProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmIdProto)
      ShortCircuitShmIdProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ShortCircuitShmIdProto.newBuilder() to construct.
    private ShortCircuitShmIdProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ShortCircuitShmIdProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ShortCircuitShmIdProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
    }

    private int bitField0_;
    public static final int HI_FIELD_NUMBER = 1;
    private long hi_ = 0L;
    /**
     * <code>required int64 hi = 1;</code>
     * @return Whether the hi field is set.
     */
    @java.lang.Override
    public boolean hasHi() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required int64 hi = 1;</code>
     * @return The hi.
     */
    @java.lang.Override
    public long getHi() {
      return hi_;
    }

    public static final int LO_FIELD_NUMBER = 2;
    private long lo_ = 0L;
    /**
     * <code>required int64 lo = 2;</code>
     * @return Whether the lo field is set.
     */
    @java.lang.Override
    public boolean hasLo() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required int64 lo = 2;</code>
     * @return The lo.
     */
    @java.lang.Override
    public long getLo() {
      return lo_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHi()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLo()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeInt64(1, hi_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt64(2, lo_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(1, hi_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt64Size(2, lo_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) obj;

      if (hasHi() != other.hasHi()) return false;
      if (hasHi()) {
        if (getHi()
            != other.getHi()) return false;
      }
      if (hasLo() != other.hasLo()) return false;
      if (hasLo()) {
        if (getLo()
            != other.getLo()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHi()) {
        hash = (37 * hash) + HI_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getHi());
      }
      if (hasLo()) {
        hash = (37 * hash) + LO_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getLo());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * An ID uniquely identifying a shared memory segment.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ShortCircuitShmIdProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmIdProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        hi_ = 0L;
        lo_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.hi_ = hi_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.lo_ = lo_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) return this;
        if (other.hasHi()) {
          setHi(other.getHi());
        }
        if (other.hasLo()) {
          setLo(other.getLo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHi()) {
          return false;
        }
        if (!hasLo()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                hi_ = input.readInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                lo_ = input.readInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long hi_ ;
      /**
       * <code>required int64 hi = 1;</code>
       * @return Whether the hi field is set.
       */
      @java.lang.Override
      public boolean hasHi() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required int64 hi = 1;</code>
       * @return The hi.
       */
      @java.lang.Override
      public long getHi() {
        return hi_;
      }
      /**
       * <code>required int64 hi = 1;</code>
       * @param value The hi to set.
       * @return This builder for chaining.
       */
      public Builder setHi(long value) {

        hi_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 hi = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearHi() {
        bitField0_ = (bitField0_ & ~0x00000001);
        hi_ = 0L;
        onChanged();
        return this;
      }

      private long lo_ ;
      /**
       * <code>required int64 lo = 2;</code>
       * @return Whether the lo field is set.
       */
      @java.lang.Override
      public boolean hasLo() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required int64 lo = 2;</code>
       * @return The lo.
       */
      @java.lang.Override
      public long getLo() {
        return lo_;
      }
      /**
       * <code>required int64 lo = 2;</code>
       * @param value The lo to set.
       * @return This builder for chaining.
       */
      public Builder setLo(long value) {

        lo_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required int64 lo = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearLo() {
        bitField0_ = (bitField0_ & ~0x00000002);
        lo_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmIdProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmIdProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmIdProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ShortCircuitShmIdProto>() {
      @java.lang.Override
      public ShortCircuitShmIdProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmIdProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmIdProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ShortCircuitShmSlotProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmSlotProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     * @return Whether the shmId field is set.
     */
    boolean hasShmId();
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     * @return The shmId.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId();
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder();

    /**
     * <code>required int32 slotIdx = 2;</code>
     * @return Whether the slotIdx field is set.
     */
    boolean hasSlotIdx();
    /**
     * <code>required int32 slotIdx = 2;</code>
     * @return The slotIdx.
     */
    int getSlotIdx();
  }
  /**
   * <pre>
   **
   * An ID uniquely identifying a slot within a shared memory segment.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
   */
  public static final class ShortCircuitShmSlotProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
      ShortCircuitShmSlotProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ShortCircuitShmSlotProto.newBuilder() to construct.
    private ShortCircuitShmSlotProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ShortCircuitShmSlotProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ShortCircuitShmSlotProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
    }

    private int bitField0_;
    public static final int SHMID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     * @return Whether the shmId field is set.
     */
    @java.lang.Override
    public boolean hasShmId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     * @return The shmId.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
      return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
    }
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
      return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
    }

    public static final int SLOTIDX_FIELD_NUMBER = 2;
    private int slotIdx_ = 0;
    /**
     * <code>required int32 slotIdx = 2;</code>
     * @return Whether the slotIdx field is set.
     */
    @java.lang.Override
    public boolean hasSlotIdx() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required int32 slotIdx = 2;</code>
     * @return The slotIdx.
     */
    @java.lang.Override
    public int getSlotIdx() {
      return slotIdx_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasShmId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSlotIdx()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getShmId().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getShmId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeInt32(2, slotIdx_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getShmId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeInt32Size(2, slotIdx_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) obj;

      if (hasShmId() != other.hasShmId()) return false;
      if (hasShmId()) {
        if (!getShmId()
            .equals(other.getShmId())) return false;
      }
      if (hasSlotIdx() != other.hasSlotIdx()) return false;
      if (hasSlotIdx()) {
        if (getSlotIdx()
            != other.getSlotIdx()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasShmId()) {
        hash = (37 * hash) + SHMID_FIELD_NUMBER;
        hash = (53 * hash) + getShmId().hashCode();
      }
      if (hasSlotIdx()) {
        hash = (37 * hash) + SLOTIDX_FIELD_NUMBER;
        hash = (53 * hash) + getSlotIdx();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * An ID uniquely identifying a slot within a shared memory segment.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ShortCircuitShmSlotProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmSlotProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getShmIdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        shmId_ = null;
        if (shmIdBuilder_ != null) {
          shmIdBuilder_.dispose();
          shmIdBuilder_ = null;
        }
        slotIdx_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.shmId_ = shmIdBuilder_ == null
              ? shmId_
              : shmIdBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.slotIdx_ = slotIdx_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) return this;
        if (other.hasShmId()) {
          mergeShmId(other.getShmId());
        }
        if (other.hasSlotIdx()) {
          setSlotIdx(other.getSlotIdx());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasShmId()) {
          return false;
        }
        if (!hasSlotIdx()) {
          return false;
        }
        if (!getShmId().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getShmIdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                slotIdx_ = input.readInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto shmId_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> shmIdBuilder_;
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       * @return Whether the shmId field is set.
       */
      public boolean hasShmId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       * @return The shmId.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getShmId() {
        if (shmIdBuilder_ == null) {
          return shmId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
        } else {
          return shmIdBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public Builder setShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
        if (shmIdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          shmId_ = value;
        } else {
          shmIdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public Builder setShmId(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
        if (shmIdBuilder_ == null) {
          shmId_ = builderForValue.build();
        } else {
          shmIdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public Builder mergeShmId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
        if (shmIdBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            shmId_ != null &&
            shmId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
            getShmIdBuilder().mergeFrom(value);
          } else {
            shmId_ = value;
          }
        } else {
          shmIdBuilder_.mergeFrom(value);
        }
        if (shmId_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public Builder clearShmId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        shmId_ = null;
        if (shmIdBuilder_ != null) {
          shmIdBuilder_.dispose();
          shmIdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getShmIdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getShmIdFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getShmIdOrBuilder() {
        if (shmIdBuilder_ != null) {
          return shmIdBuilder_.getMessageOrBuilder();
        } else {
          return shmId_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : shmId_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmIdProto shmId = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> 
          getShmIdFieldBuilder() {
        if (shmIdBuilder_ == null) {
          shmIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
                  getShmId(),
                  getParentForChildren(),
                  isClean());
          shmId_ = null;
        }
        return shmIdBuilder_;
      }

      private int slotIdx_ ;
      /**
       * <code>required int32 slotIdx = 2;</code>
       * @return Whether the slotIdx field is set.
       */
      @java.lang.Override
      public boolean hasSlotIdx() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required int32 slotIdx = 2;</code>
       * @return The slotIdx.
       */
      @java.lang.Override
      public int getSlotIdx() {
        return slotIdx_;
      }
      /**
       * <code>required int32 slotIdx = 2;</code>
       * @param value The slotIdx to set.
       * @return This builder for chaining.
       */
      public Builder setSlotIdx(int value) {

        slotIdx_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required int32 slotIdx = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSlotIdx() {
        bitField0_ = (bitField0_ & ~0x00000002);
        slotIdx_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmSlotProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmSlotProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ShortCircuitShmSlotProto>() {
      @java.lang.Override
      public ShortCircuitShmSlotProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmSlotProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmSlotProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpRequestShortCircuitAccessProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpRequestShortCircuitAccessProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    boolean hasHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader();
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder();

    /**
     * <pre>
     ** In order to get short-circuit access to block data, clients must set this
     * to the highest version of the block data that they can understand.
     * Currently 1 is the only version, but more versions may exist in the future
     * if the on-disk format changes.
     * </pre>
     *
     * <code>required uint32 maxVersion = 2;</code>
     * @return Whether the maxVersion field is set.
     */
    boolean hasMaxVersion();
    /**
     * <pre>
     ** In order to get short-circuit access to block data, clients must set this
     * to the highest version of the block data that they can understand.
     * Currently 1 is the only version, but more versions may exist in the future
     * if the on-disk format changes.
     * </pre>
     *
     * <code>required uint32 maxVersion = 2;</code>
     * @return The maxVersion.
     */
    int getMaxVersion();

    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     * @return Whether the slotId field is set.
     */
    boolean hasSlotId();
    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     * @return The slotId.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();

    /**
     * <pre>
     **
     * True if the client supports verifying that the file descriptor has been
     * sent successfully.
     * </pre>
     *
     * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
     * @return Whether the supportsReceiptVerification field is set.
     */
    boolean hasSupportsReceiptVerification();
    /**
     * <pre>
     **
     * True if the client supports verifying that the file descriptor has been
     * sent successfully.
     * </pre>
     *
     * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
     * @return The supportsReceiptVerification.
     */
    boolean getSupportsReceiptVerification();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
   */
  public static final class OpRequestShortCircuitAccessProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
      OpRequestShortCircuitAccessProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpRequestShortCircuitAccessProto.newBuilder() to construct.
    private OpRequestShortCircuitAccessProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpRequestShortCircuitAccessProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpRequestShortCircuitAccessProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
    }

    private int bitField0_;
    public static final int HEADER_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return Whether the header field is set.
     */
    @java.lang.Override
    public boolean hasHeader() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     * @return The header.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }
    /**
     * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
      return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
    }

    public static final int MAXVERSION_FIELD_NUMBER = 2;
    private int maxVersion_ = 0;
    /**
     * <pre>
     ** In order to get short-circuit access to block data, clients must set this
     * to the highest version of the block data that they can understand.
     * Currently 1 is the only version, but more versions may exist in the future
     * if the on-disk format changes.
     * </pre>
     *
     * <code>required uint32 maxVersion = 2;</code>
     * @return Whether the maxVersion field is set.
     */
    @java.lang.Override
    public boolean hasMaxVersion() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     ** In order to get short-circuit access to block data, clients must set this
     * to the highest version of the block data that they can understand.
     * Currently 1 is the only version, but more versions may exist in the future
     * if the on-disk format changes.
     * </pre>
     *
     * <code>required uint32 maxVersion = 2;</code>
     * @return The maxVersion.
     */
    @java.lang.Override
    public int getMaxVersion() {
      return maxVersion_;
    }

    public static final int SLOTID_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     * @return Whether the slotId field is set.
     */
    @java.lang.Override
    public boolean hasSlotId() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     * @return The slotId.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
      return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
    }
    /**
     * <pre>
     **
     * The shared memory slot to use, if we are using one.
     * </pre>
     *
     * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
      return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
    }

    public static final int SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER = 4;
    private boolean supportsReceiptVerification_ = false;
    /**
     * <pre>
     **
     * True if the client supports verifying that the file descriptor has been
     * sent successfully.
     * </pre>
     *
     * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
     * @return Whether the supportsReceiptVerification field is set.
     */
    @java.lang.Override
    public boolean hasSupportsReceiptVerification() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <pre>
     **
     * True if the client supports verifying that the file descriptor has been
     * sent successfully.
     * </pre>
     *
     * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
     * @return The supportsReceiptVerification.
     */
    @java.lang.Override
    public boolean getSupportsReceiptVerification() {
      return supportsReceiptVerification_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasHeader()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasMaxVersion()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getHeader().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasSlotId()) {
        if (!getSlotId().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt32(2, maxVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getSlotId());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeBool(4, supportsReceiptVerification_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getHeader());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(2, maxVersion_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getSlotId());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(4, supportsReceiptVerification_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) obj;

      if (hasHeader() != other.hasHeader()) return false;
      if (hasHeader()) {
        if (!getHeader()
            .equals(other.getHeader())) return false;
      }
      if (hasMaxVersion() != other.hasMaxVersion()) return false;
      if (hasMaxVersion()) {
        if (getMaxVersion()
            != other.getMaxVersion()) return false;
      }
      if (hasSlotId() != other.hasSlotId()) return false;
      if (hasSlotId()) {
        if (!getSlotId()
            .equals(other.getSlotId())) return false;
      }
      if (hasSupportsReceiptVerification() != other.hasSupportsReceiptVerification()) return false;
      if (hasSupportsReceiptVerification()) {
        if (getSupportsReceiptVerification()
            != other.getSupportsReceiptVerification()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasHeader()) {
        hash = (37 * hash) + HEADER_FIELD_NUMBER;
        hash = (53 * hash) + getHeader().hashCode();
      }
      if (hasMaxVersion()) {
        hash = (37 * hash) + MAXVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getMaxVersion();
      }
      if (hasSlotId()) {
        hash = (37 * hash) + SLOTID_FIELD_NUMBER;
        hash = (53 * hash) + getSlotId().hashCode();
      }
      if (hasSupportsReceiptVerification()) {
        hash = (37 * hash) + SUPPORTSRECEIPTVERIFICATION_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getSupportsReceiptVerification());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpRequestShortCircuitAccessProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpRequestShortCircuitAccessProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getHeaderFieldBuilder();
          getSlotIdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        maxVersion_ = 0;
        slotId_ = null;
        if (slotIdBuilder_ != null) {
          slotIdBuilder_.dispose();
          slotIdBuilder_ = null;
        }
        supportsReceiptVerification_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.header_ = headerBuilder_ == null
              ? header_
              : headerBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.maxVersion_ = maxVersion_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.slotId_ = slotIdBuilder_ == null
              ? slotId_
              : slotIdBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.supportsReceiptVerification_ = supportsReceiptVerification_;
          to_bitField0_ |= 0x00000008;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto.getDefaultInstance()) return this;
        if (other.hasHeader()) {
          mergeHeader(other.getHeader());
        }
        if (other.hasMaxVersion()) {
          setMaxVersion(other.getMaxVersion());
        }
        if (other.hasSlotId()) {
          mergeSlotId(other.getSlotId());
        }
        if (other.hasSupportsReceiptVerification()) {
          setSupportsReceiptVerification(other.getSupportsReceiptVerification());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasHeader()) {
          return false;
        }
        if (!hasMaxVersion()) {
          return false;
        }
        if (!getHeader().isInitialized()) {
          return false;
        }
        if (hasSlotId()) {
          if (!getSlotId().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getHeaderFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                maxVersion_ = input.readUInt32();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                input.readMessage(
                    getSlotIdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                supportsReceiptVerification_ = input.readBool();
                bitField0_ |= 0x00000008;
                break;
              } // case 32
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto header_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> headerBuilder_;
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return Whether the header field is set.
       */
      public boolean hasHeader() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       * @return The header.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto getHeader() {
        if (headerBuilder_ == null) {
          return header_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        } else {
          return headerBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          header_ = value;
        } else {
          headerBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder setHeader(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder builderForValue) {
        if (headerBuilder_ == null) {
          header_ = builderForValue.build();
        } else {
          headerBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder mergeHeader(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto value) {
        if (headerBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            header_ != null &&
            header_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance()) {
            getHeaderBuilder().mergeFrom(value);
          } else {
            header_ = value;
          }
        } else {
          headerBuilder_.mergeFrom(value);
        }
        if (header_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public Builder clearHeader() {
        bitField0_ = (bitField0_ & ~0x00000001);
        header_ = null;
        if (headerBuilder_ != null) {
          headerBuilder_.dispose();
          headerBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder getHeaderBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getHeaderFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder getHeaderOrBuilder() {
        if (headerBuilder_ != null) {
          return headerBuilder_.getMessageOrBuilder();
        } else {
          return header_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.getDefaultInstance() : header_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.BaseHeaderProto header = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder> 
          getHeaderFieldBuilder() {
        if (headerBuilder_ == null) {
          headerBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProtoOrBuilder>(
                  getHeader(),
                  getParentForChildren(),
                  isClean());
          header_ = null;
        }
        return headerBuilder_;
      }

      private int maxVersion_ ;
      /**
       * <pre>
       ** In order to get short-circuit access to block data, clients must set this
       * to the highest version of the block data that they can understand.
       * Currently 1 is the only version, but more versions may exist in the future
       * if the on-disk format changes.
       * </pre>
       *
       * <code>required uint32 maxVersion = 2;</code>
       * @return Whether the maxVersion field is set.
       */
      @java.lang.Override
      public boolean hasMaxVersion() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       ** In order to get short-circuit access to block data, clients must set this
       * to the highest version of the block data that they can understand.
       * Currently 1 is the only version, but more versions may exist in the future
       * if the on-disk format changes.
       * </pre>
       *
       * <code>required uint32 maxVersion = 2;</code>
       * @return The maxVersion.
       */
      @java.lang.Override
      public int getMaxVersion() {
        return maxVersion_;
      }
      /**
       * <pre>
       ** In order to get short-circuit access to block data, clients must set this
       * to the highest version of the block data that they can understand.
       * Currently 1 is the only version, but more versions may exist in the future
       * if the on-disk format changes.
       * </pre>
       *
       * <code>required uint32 maxVersion = 2;</code>
       * @param value The maxVersion to set.
       * @return This builder for chaining.
       */
      public Builder setMaxVersion(int value) {

        maxVersion_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       ** In order to get short-circuit access to block data, clients must set this
       * to the highest version of the block data that they can understand.
       * Currently 1 is the only version, but more versions may exist in the future
       * if the on-disk format changes.
       * </pre>
       *
       * <code>required uint32 maxVersion = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearMaxVersion() {
        bitField0_ = (bitField0_ & ~0x00000002);
        maxVersion_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       * @return Whether the slotId field is set.
       */
      public boolean hasSlotId() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       * @return The slotId.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
        if (slotIdBuilder_ == null) {
          return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
        } else {
          return slotIdBuilder_.getMessage();
        }
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
        if (slotIdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          slotId_ = value;
        } else {
          slotIdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public Builder setSlotId(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
        if (slotIdBuilder_ == null) {
          slotId_ = builderForValue.build();
        } else {
          slotIdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
        if (slotIdBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            slotId_ != null &&
            slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
            getSlotIdBuilder().mergeFrom(value);
          } else {
            slotId_ = value;
          }
        } else {
          slotIdBuilder_.mergeFrom(value);
        }
        if (slotId_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public Builder clearSlotId() {
        bitField0_ = (bitField0_ & ~0x00000004);
        slotId_ = null;
        if (slotIdBuilder_ != null) {
          slotIdBuilder_.dispose();
          slotIdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getSlotIdFieldBuilder().getBuilder();
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
        if (slotIdBuilder_ != null) {
          return slotIdBuilder_.getMessageOrBuilder();
        } else {
          return slotId_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
        }
      }
      /**
       * <pre>
       **
       * The shared memory slot to use, if we are using one.
       * </pre>
       *
       * <code>optional .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> 
          getSlotIdFieldBuilder() {
        if (slotIdBuilder_ == null) {
          slotIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
                  getSlotId(),
                  getParentForChildren(),
                  isClean());
          slotId_ = null;
        }
        return slotIdBuilder_;
      }

      private boolean supportsReceiptVerification_ ;
      /**
       * <pre>
       **
       * True if the client supports verifying that the file descriptor has been
       * sent successfully.
       * </pre>
       *
       * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
       * @return Whether the supportsReceiptVerification field is set.
       */
      @java.lang.Override
      public boolean hasSupportsReceiptVerification() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <pre>
       **
       * True if the client supports verifying that the file descriptor has been
       * sent successfully.
       * </pre>
       *
       * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
       * @return The supportsReceiptVerification.
       */
      @java.lang.Override
      public boolean getSupportsReceiptVerification() {
        return supportsReceiptVerification_;
      }
      /**
       * <pre>
       **
       * True if the client supports verifying that the file descriptor has been
       * sent successfully.
       * </pre>
       *
       * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
       * @param value The supportsReceiptVerification to set.
       * @return This builder for chaining.
       */
      public Builder setSupportsReceiptVerification(boolean value) {

        supportsReceiptVerification_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * True if the client supports verifying that the file descriptor has been
       * sent successfully.
       * </pre>
       *
       * <code>optional bool supportsReceiptVerification = 4 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearSupportsReceiptVerification() {
        bitField0_ = (bitField0_ & ~0x00000008);
        supportsReceiptVerification_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpRequestShortCircuitAccessProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpRequestShortCircuitAccessProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpRequestShortCircuitAccessProto>() {
      @java.lang.Override
      public OpRequestShortCircuitAccessProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpRequestShortCircuitAccessProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpRequestShortCircuitAccessProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReleaseShortCircuitAccessRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     * @return Whether the slotId field is set.
     */
    boolean hasSlotId();
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     * @return The slotId.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId();
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return Whether the traceInfo field is set.
     */
    boolean hasTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return The traceInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
   */
  public static final class ReleaseShortCircuitAccessRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
      ReleaseShortCircuitAccessRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReleaseShortCircuitAccessRequestProto.newBuilder() to construct.
    private ReleaseShortCircuitAccessRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReleaseShortCircuitAccessRequestProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReleaseShortCircuitAccessRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int SLOTID_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     * @return Whether the slotId field is set.
     */
    @java.lang.Override
    public boolean hasSlotId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     * @return The slotId.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
      return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
    }
    /**
     * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
      return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
    }

    public static final int TRACEINFO_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return Whether the traceInfo field is set.
     */
    @java.lang.Override
    public boolean hasTraceInfo() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return The traceInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSlotId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getSlotId().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getSlotId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getTraceInfo());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getSlotId());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getTraceInfo());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) obj;

      if (hasSlotId() != other.hasSlotId()) return false;
      if (hasSlotId()) {
        if (!getSlotId()
            .equals(other.getSlotId())) return false;
      }
      if (hasTraceInfo() != other.hasTraceInfo()) return false;
      if (hasTraceInfo()) {
        if (!getTraceInfo()
            .equals(other.getTraceInfo())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSlotId()) {
        hash = (37 * hash) + SLOTID_FIELD_NUMBER;
        hash = (53 * hash) + getSlotId().hashCode();
      }
      if (hasTraceInfo()) {
        hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
        hash = (53 * hash) + getTraceInfo().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getSlotIdFieldBuilder();
          getTraceInfoFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        slotId_ = null;
        if (slotIdBuilder_ != null) {
          slotIdBuilder_.dispose();
          slotIdBuilder_ = null;
        }
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.slotId_ = slotIdBuilder_ == null
              ? slotId_
              : slotIdBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.traceInfo_ = traceInfoBuilder_ == null
              ? traceInfo_
              : traceInfoBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto.getDefaultInstance()) return this;
        if (other.hasSlotId()) {
          mergeSlotId(other.getSlotId());
        }
        if (other.hasTraceInfo()) {
          mergeTraceInfo(other.getTraceInfo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSlotId()) {
          return false;
        }
        if (!getSlotId().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getSlotIdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getTraceInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto slotId_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> slotIdBuilder_;
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       * @return Whether the slotId field is set.
       */
      public boolean hasSlotId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       * @return The slotId.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto getSlotId() {
        if (slotIdBuilder_ == null) {
          return slotId_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
        } else {
          return slotIdBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public Builder setSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
        if (slotIdBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          slotId_ = value;
        } else {
          slotIdBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public Builder setSlotId(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder builderForValue) {
        if (slotIdBuilder_ == null) {
          slotId_ = builderForValue.build();
        } else {
          slotIdBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public Builder mergeSlotId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto value) {
        if (slotIdBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            slotId_ != null &&
            slotId_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance()) {
            getSlotIdBuilder().mergeFrom(value);
          } else {
            slotId_ = value;
          }
        } else {
          slotIdBuilder_.mergeFrom(value);
        }
        if (slotId_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public Builder clearSlotId() {
        bitField0_ = (bitField0_ & ~0x00000001);
        slotId_ = null;
        if (slotIdBuilder_ != null) {
          slotIdBuilder_.dispose();
          slotIdBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder getSlotIdBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getSlotIdFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder getSlotIdOrBuilder() {
        if (slotIdBuilder_ != null) {
          return slotIdBuilder_.getMessageOrBuilder();
        } else {
          return slotId_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.getDefaultInstance() : slotId_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ShortCircuitShmSlotProto slotId = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder> 
          getSlotIdFieldBuilder() {
        if (slotIdBuilder_ == null) {
          slotIdBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProtoOrBuilder>(
                  getSlotId(),
                  getParentForChildren(),
                  isClean());
          slotId_ = null;
        }
        return slotIdBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       * @return Whether the traceInfo field is set.
       */
      public boolean hasTraceInfo() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       * @return The traceInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
        if (traceInfoBuilder_ == null) {
          return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        } else {
          return traceInfoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          traceInfo_ = value;
        } else {
          traceInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder setTraceInfo(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
        if (traceInfoBuilder_ == null) {
          traceInfo_ = builderForValue.build();
        } else {
          traceInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            traceInfo_ != null &&
            traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
            getTraceInfoBuilder().mergeFrom(value);
          } else {
            traceInfo_ = value;
          }
        } else {
          traceInfoBuilder_.mergeFrom(value);
        }
        if (traceInfo_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder clearTraceInfo() {
        bitField0_ = (bitField0_ & ~0x00000002);
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getTraceInfoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
        if (traceInfoBuilder_ != null) {
          return traceInfoBuilder_.getMessageOrBuilder();
        } else {
          return traceInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> 
          getTraceInfoFieldBuilder() {
        if (traceInfoBuilder_ == null) {
          traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
                  getTraceInfo(),
                  getParentForChildren(),
                  isClean());
          traceInfo_ = null;
        }
        return traceInfoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReleaseShortCircuitAccessRequestProto>() {
      @java.lang.Override
      public ReleaseShortCircuitAccessRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReleaseShortCircuitAccessResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();

    /**
     * <code>optional string error = 2;</code>
     * @return Whether the error field is set.
     */
    boolean hasError();
    /**
     * <code>optional string error = 2;</code>
     * @return The error.
     */
    java.lang.String getError();
    /**
     * <code>optional string error = 2;</code>
     * @return The bytes for error.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
   */
  public static final class ReleaseShortCircuitAccessResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
      ReleaseShortCircuitAccessResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReleaseShortCircuitAccessResponseProto.newBuilder() to construct.
    private ReleaseShortCircuitAccessResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReleaseShortCircuitAccessResponseProto() {
      status_ = 0;
      error_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReleaseShortCircuitAccessResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
    }

    public static final int ERROR_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object error_ = "";
    /**
     * <code>optional string error = 2;</code>
     * @return Whether the error field is set.
     */
    @java.lang.Override
    public boolean hasError() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional string error = 2;</code>
     * @return The error.
     */
    @java.lang.Override
    public java.lang.String getError() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          error_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string error = 2;</code>
     * @return The bytes for error.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        error_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, error_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, error_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (hasError() != other.hasError()) return false;
      if (hasError()) {
        if (!getError()
            .equals(other.getError())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      if (hasError()) {
        hash = (37 * hash) + ERROR_FIELD_NUMBER;
        hash = (53 * hash) + getError().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ReleaseShortCircuitAccessResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        error_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.error_ = error_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        if (other.hasError()) {
          error_ = other.error_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                error_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object error_ = "";
      /**
       * <code>optional string error = 2;</code>
       * @return Whether the error field is set.
       */
      public boolean hasError() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string error = 2;</code>
       * @return The error.
       */
      public java.lang.String getError() {
        java.lang.Object ref = error_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            error_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string error = 2;</code>
       * @return The bytes for error.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getErrorBytes() {
        java.lang.Object ref = error_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          error_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string error = 2;</code>
       * @param value The error to set.
       * @return This builder for chaining.
       */
      public Builder setError(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearError() {
        error_ = getDefaultInstance().getError();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 2;</code>
       * @param value The bytes for error to set.
       * @return This builder for chaining.
       */
      public Builder setErrorBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReleaseShortCircuitAccessResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReleaseShortCircuitAccessResponseProto>() {
      @java.lang.Override
      public ReleaseShortCircuitAccessResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReleaseShortCircuitAccessResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ShortCircuitShmRequestProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmRequestProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return Whether the clientName field is set.
     */
    boolean hasClientName();
    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return The clientName.
     */
    java.lang.String getClientName();
    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return The bytes for clientName.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getClientNameBytes();

    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return Whether the traceInfo field is set.
     */
    boolean hasTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return The traceInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo();
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
   */
  public static final class ShortCircuitShmRequestProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmRequestProto)
      ShortCircuitShmRequestProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ShortCircuitShmRequestProto.newBuilder() to construct.
    private ShortCircuitShmRequestProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ShortCircuitShmRequestProto() {
      clientName_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ShortCircuitShmRequestProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
    }

    private int bitField0_;
    public static final int CLIENTNAME_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object clientName_ = "";
    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return Whether the clientName field is set.
     */
    @java.lang.Override
    public boolean hasClientName() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return The clientName.
     */
    @java.lang.Override
    public java.lang.String getClientName() {
      java.lang.Object ref = clientName_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          clientName_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     * The name of the client requesting the shared memory segment.  This is
     * purely for logging / debugging purposes.
     * </pre>
     *
     * <code>required string clientName = 1;</code>
     * @return The bytes for clientName.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getClientNameBytes() {
      java.lang.Object ref = clientName_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        clientName_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int TRACEINFO_FIELD_NUMBER = 2;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return Whether the traceInfo field is set.
     */
    @java.lang.Override
    public boolean hasTraceInfo() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     * @return The traceInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }
    /**
     * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
      return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasClientName()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, clientName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeMessage(2, getTraceInfo());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, clientName_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(2, getTraceInfo());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) obj;

      if (hasClientName() != other.hasClientName()) return false;
      if (hasClientName()) {
        if (!getClientName()
            .equals(other.getClientName())) return false;
      }
      if (hasTraceInfo() != other.hasTraceInfo()) return false;
      if (hasTraceInfo()) {
        if (!getTraceInfo()
            .equals(other.getTraceInfo())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasClientName()) {
        hash = (37 * hash) + CLIENTNAME_FIELD_NUMBER;
        hash = (53 * hash) + getClientName().hashCode();
      }
      if (hasTraceInfo()) {
        hash = (37 * hash) + TRACEINFO_FIELD_NUMBER;
        hash = (53 * hash) + getTraceInfo().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ShortCircuitShmRequestProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmRequestProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getTraceInfoFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        clientName_ = "";
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.clientName_ = clientName_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.traceInfo_ = traceInfoBuilder_ == null
              ? traceInfo_
              : traceInfoBuilder_.build();
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto.getDefaultInstance()) return this;
        if (other.hasClientName()) {
          clientName_ = other.clientName_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        if (other.hasTraceInfo()) {
          mergeTraceInfo(other.getTraceInfo());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasClientName()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                clientName_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 18: {
                input.readMessage(
                    getTraceInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object clientName_ = "";
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @return Whether the clientName field is set.
       */
      public boolean hasClientName() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @return The clientName.
       */
      public java.lang.String getClientName() {
        java.lang.Object ref = clientName_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            clientName_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @return The bytes for clientName.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getClientNameBytes() {
        java.lang.Object ref = clientName_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          clientName_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @param value The clientName to set.
       * @return This builder for chaining.
       */
      public Builder setClientName(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        clientName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearClientName() {
        clientName_ = getDefaultInstance().getClientName();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <pre>
       * The name of the client requesting the shared memory segment.  This is
       * purely for logging / debugging purposes.
       * </pre>
       *
       * <code>required string clientName = 1;</code>
       * @param value The bytes for clientName to set.
       * @return This builder for chaining.
       */
      public Builder setClientNameBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        clientName_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto traceInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> traceInfoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       * @return Whether the traceInfo field is set.
       */
      public boolean hasTraceInfo() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       * @return The traceInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto getTraceInfo() {
        if (traceInfoBuilder_ == null) {
          return traceInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        } else {
          return traceInfoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder setTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          traceInfo_ = value;
        } else {
          traceInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder setTraceInfo(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder builderForValue) {
        if (traceInfoBuilder_ == null) {
          traceInfo_ = builderForValue.build();
        } else {
          traceInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder mergeTraceInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto value) {
        if (traceInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000002) != 0) &&
            traceInfo_ != null &&
            traceInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance()) {
            getTraceInfoBuilder().mergeFrom(value);
          } else {
            traceInfo_ = value;
          }
        } else {
          traceInfoBuilder_.mergeFrom(value);
        }
        if (traceInfo_ != null) {
          bitField0_ |= 0x00000002;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public Builder clearTraceInfo() {
        bitField0_ = (bitField0_ & ~0x00000002);
        traceInfo_ = null;
        if (traceInfoBuilder_ != null) {
          traceInfoBuilder_.dispose();
          traceInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder getTraceInfoBuilder() {
        bitField0_ |= 0x00000002;
        onChanged();
        return getTraceInfoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder getTraceInfoOrBuilder() {
        if (traceInfoBuilder_ != null) {
          return traceInfoBuilder_.getMessageOrBuilder();
        } else {
          return traceInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.getDefaultInstance() : traceInfo_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.DataTransferTraceInfoProto traceInfo = 2;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder> 
          getTraceInfoFieldBuilder() {
        if (traceInfoBuilder_ == null) {
          traceInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProtoOrBuilder>(
                  getTraceInfo(),
                  getParentForChildren(),
                  isClean());
          traceInfo_ = null;
        }
        return traceInfoBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmRequestProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmRequestProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ShortCircuitShmRequestProto>() {
      @java.lang.Override
      public ShortCircuitShmRequestProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmRequestProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmRequestProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ShortCircuitShmResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ShortCircuitShmResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();

    /**
     * <code>optional string error = 2;</code>
     * @return Whether the error field is set.
     */
    boolean hasError();
    /**
     * <code>optional string error = 2;</code>
     * @return The error.
     */
    java.lang.String getError();
    /**
     * <code>optional string error = 2;</code>
     * @return The bytes for error.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes();

    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     * @return Whether the id field is set.
     */
    boolean hasId();
    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     * @return The id.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId();
    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
   */
  public static final class ShortCircuitShmResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
      ShortCircuitShmResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ShortCircuitShmResponseProto.newBuilder() to construct.
    private ShortCircuitShmResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ShortCircuitShmResponseProto() {
      status_ = 0;
      error_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ShortCircuitShmResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
    }

    public static final int ERROR_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object error_ = "";
    /**
     * <code>optional string error = 2;</code>
     * @return Whether the error field is set.
     */
    @java.lang.Override
    public boolean hasError() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional string error = 2;</code>
     * @return The error.
     */
    @java.lang.Override
    public java.lang.String getError() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          error_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string error = 2;</code>
     * @return The bytes for error.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getErrorBytes() {
      java.lang.Object ref = error_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        error_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int ID_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     * @return Whether the id field is set.
     */
    @java.lang.Override
    public boolean hasId() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     * @return The id.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
      return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
    }
    /**
     * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
      return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasId()) {
        if (!getId().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, error_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getId());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, error_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getId());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (hasError() != other.hasError()) return false;
      if (hasError()) {
        if (!getError()
            .equals(other.getError())) return false;
      }
      if (hasId() != other.hasId()) return false;
      if (hasId()) {
        if (!getId()
            .equals(other.getId())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      if (hasError()) {
        hash = (37 * hash) + ERROR_FIELD_NUMBER;
        hash = (53 * hash) + getError().hashCode();
      }
      if (hasId()) {
        hash = (37 * hash) + ID_FIELD_NUMBER;
        hash = (53 * hash) + getId().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.ShortCircuitShmResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ShortCircuitShmResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getIdFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        error_ = "";
        id_ = null;
        if (idBuilder_ != null) {
          idBuilder_.dispose();
          idBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.error_ = error_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.id_ = idBuilder_ == null
              ? id_
              : idBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        if (other.hasError()) {
          error_ = other.error_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasId()) {
          mergeId(other.getId());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        if (hasId()) {
          if (!getId().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                error_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getIdFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object error_ = "";
      /**
       * <code>optional string error = 2;</code>
       * @return Whether the error field is set.
       */
      public boolean hasError() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string error = 2;</code>
       * @return The error.
       */
      public java.lang.String getError() {
        java.lang.Object ref = error_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            error_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string error = 2;</code>
       * @return The bytes for error.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getErrorBytes() {
        java.lang.Object ref = error_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          error_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string error = 2;</code>
       * @param value The error to set.
       * @return This builder for chaining.
       */
      public Builder setError(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearError() {
        error_ = getDefaultInstance().getError();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>optional string error = 2;</code>
       * @param value The bytes for error to set.
       * @return This builder for chaining.
       */
      public Builder setErrorBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        error_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto id_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> idBuilder_;
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       * @return Whether the id field is set.
       */
      public boolean hasId() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       * @return The id.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto getId() {
        if (idBuilder_ == null) {
          return id_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
        } else {
          return idBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public Builder setId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
        if (idBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          id_ = value;
        } else {
          idBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public Builder setId(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder builderForValue) {
        if (idBuilder_ == null) {
          id_ = builderForValue.build();
        } else {
          idBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public Builder mergeId(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto value) {
        if (idBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            id_ != null &&
            id_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance()) {
            getIdBuilder().mergeFrom(value);
          } else {
            id_ = value;
          }
        } else {
          idBuilder_.mergeFrom(value);
        }
        if (id_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public Builder clearId() {
        bitField0_ = (bitField0_ & ~0x00000004);
        id_ = null;
        if (idBuilder_ != null) {
          idBuilder_.dispose();
          idBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder getIdBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getIdFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder getIdOrBuilder() {
        if (idBuilder_ != null) {
          return idBuilder_.getMessageOrBuilder();
        } else {
          return id_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.getDefaultInstance() : id_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ShortCircuitShmIdProto id = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder> 
          getIdFieldBuilder() {
        if (idBuilder_ == null) {
          idBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProtoOrBuilder>(
                  getId(),
                  getParentForChildren(),
                  isClean());
          id_ = null;
        }
        return idBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ShortCircuitShmResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ShortCircuitShmResponseProto>() {
      @java.lang.Override
      public ShortCircuitShmResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ShortCircuitShmResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface PacketHeaderProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.PacketHeaderProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <pre>
     * All fields must be fixed-length!
     * </pre>
     *
     * <code>required sfixed64 offsetInBlock = 1;</code>
     * @return Whether the offsetInBlock field is set.
     */
    boolean hasOffsetInBlock();
    /**
     * <pre>
     * All fields must be fixed-length!
     * </pre>
     *
     * <code>required sfixed64 offsetInBlock = 1;</code>
     * @return The offsetInBlock.
     */
    long getOffsetInBlock();

    /**
     * <code>required sfixed64 seqno = 2;</code>
     * @return Whether the seqno field is set.
     */
    boolean hasSeqno();
    /**
     * <code>required sfixed64 seqno = 2;</code>
     * @return The seqno.
     */
    long getSeqno();

    /**
     * <code>required bool lastPacketInBlock = 3;</code>
     * @return Whether the lastPacketInBlock field is set.
     */
    boolean hasLastPacketInBlock();
    /**
     * <code>required bool lastPacketInBlock = 3;</code>
     * @return The lastPacketInBlock.
     */
    boolean getLastPacketInBlock();

    /**
     * <code>required sfixed32 dataLen = 4;</code>
     * @return Whether the dataLen field is set.
     */
    boolean hasDataLen();
    /**
     * <code>required sfixed32 dataLen = 4;</code>
     * @return The dataLen.
     */
    int getDataLen();

    /**
     * <code>optional bool syncBlock = 5 [default = false];</code>
     * @return Whether the syncBlock field is set.
     */
    boolean hasSyncBlock();
    /**
     * <code>optional bool syncBlock = 5 [default = false];</code>
     * @return The syncBlock.
     */
    boolean getSyncBlock();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
   */
  public static final class PacketHeaderProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.PacketHeaderProto)
      PacketHeaderProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use PacketHeaderProto.newBuilder() to construct.
    private PacketHeaderProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private PacketHeaderProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new PacketHeaderProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
    }

    private int bitField0_;
    public static final int OFFSETINBLOCK_FIELD_NUMBER = 1;
    private long offsetInBlock_ = 0L;
    /**
     * <pre>
     * All fields must be fixed-length!
     * </pre>
     *
     * <code>required sfixed64 offsetInBlock = 1;</code>
     * @return Whether the offsetInBlock field is set.
     */
    @java.lang.Override
    public boolean hasOffsetInBlock() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <pre>
     * All fields must be fixed-length!
     * </pre>
     *
     * <code>required sfixed64 offsetInBlock = 1;</code>
     * @return The offsetInBlock.
     */
    @java.lang.Override
    public long getOffsetInBlock() {
      return offsetInBlock_;
    }

    public static final int SEQNO_FIELD_NUMBER = 2;
    private long seqno_ = 0L;
    /**
     * <code>required sfixed64 seqno = 2;</code>
     * @return Whether the seqno field is set.
     */
    @java.lang.Override
    public boolean hasSeqno() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required sfixed64 seqno = 2;</code>
     * @return The seqno.
     */
    @java.lang.Override
    public long getSeqno() {
      return seqno_;
    }

    public static final int LASTPACKETINBLOCK_FIELD_NUMBER = 3;
    private boolean lastPacketInBlock_ = false;
    /**
     * <code>required bool lastPacketInBlock = 3;</code>
     * @return Whether the lastPacketInBlock field is set.
     */
    @java.lang.Override
    public boolean hasLastPacketInBlock() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bool lastPacketInBlock = 3;</code>
     * @return The lastPacketInBlock.
     */
    @java.lang.Override
    public boolean getLastPacketInBlock() {
      return lastPacketInBlock_;
    }

    public static final int DATALEN_FIELD_NUMBER = 4;
    private int dataLen_ = 0;
    /**
     * <code>required sfixed32 dataLen = 4;</code>
     * @return Whether the dataLen field is set.
     */
    @java.lang.Override
    public boolean hasDataLen() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>required sfixed32 dataLen = 4;</code>
     * @return The dataLen.
     */
    @java.lang.Override
    public int getDataLen() {
      return dataLen_;
    }

    public static final int SYNCBLOCK_FIELD_NUMBER = 5;
    private boolean syncBlock_ = false;
    /**
     * <code>optional bool syncBlock = 5 [default = false];</code>
     * @return Whether the syncBlock field is set.
     */
    @java.lang.Override
    public boolean hasSyncBlock() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional bool syncBlock = 5 [default = false];</code>
     * @return The syncBlock.
     */
    @java.lang.Override
    public boolean getSyncBlock() {
      return syncBlock_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasOffsetInBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasSeqno()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasLastPacketInBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasDataLen()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeSFixed64(1, offsetInBlock_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeSFixed64(2, seqno_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBool(3, lastPacketInBlock_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeSFixed32(4, dataLen_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeBool(5, syncBlock_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeSFixed64Size(1, offsetInBlock_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeSFixed64Size(2, seqno_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(3, lastPacketInBlock_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeSFixed32Size(4, dataLen_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBoolSize(5, syncBlock_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) obj;

      if (hasOffsetInBlock() != other.hasOffsetInBlock()) return false;
      if (hasOffsetInBlock()) {
        if (getOffsetInBlock()
            != other.getOffsetInBlock()) return false;
      }
      if (hasSeqno() != other.hasSeqno()) return false;
      if (hasSeqno()) {
        if (getSeqno()
            != other.getSeqno()) return false;
      }
      if (hasLastPacketInBlock() != other.hasLastPacketInBlock()) return false;
      if (hasLastPacketInBlock()) {
        if (getLastPacketInBlock()
            != other.getLastPacketInBlock()) return false;
      }
      if (hasDataLen() != other.hasDataLen()) return false;
      if (hasDataLen()) {
        if (getDataLen()
            != other.getDataLen()) return false;
      }
      if (hasSyncBlock() != other.hasSyncBlock()) return false;
      if (hasSyncBlock()) {
        if (getSyncBlock()
            != other.getSyncBlock()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasOffsetInBlock()) {
        hash = (37 * hash) + OFFSETINBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getOffsetInBlock());
      }
      if (hasSeqno()) {
        hash = (37 * hash) + SEQNO_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSeqno());
      }
      if (hasLastPacketInBlock()) {
        hash = (37 * hash) + LASTPACKETINBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getLastPacketInBlock());
      }
      if (hasDataLen()) {
        hash = (37 * hash) + DATALEN_FIELD_NUMBER;
        hash = (53 * hash) + getDataLen();
      }
      if (hasSyncBlock()) {
        hash = (37 * hash) + SYNCBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashBoolean(
            getSyncBlock());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.PacketHeaderProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.PacketHeaderProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        offsetInBlock_ = 0L;
        seqno_ = 0L;
        lastPacketInBlock_ = false;
        dataLen_ = 0;
        syncBlock_ = false;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.offsetInBlock_ = offsetInBlock_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.seqno_ = seqno_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.lastPacketInBlock_ = lastPacketInBlock_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.dataLen_ = dataLen_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.syncBlock_ = syncBlock_;
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto.getDefaultInstance()) return this;
        if (other.hasOffsetInBlock()) {
          setOffsetInBlock(other.getOffsetInBlock());
        }
        if (other.hasSeqno()) {
          setSeqno(other.getSeqno());
        }
        if (other.hasLastPacketInBlock()) {
          setLastPacketInBlock(other.getLastPacketInBlock());
        }
        if (other.hasDataLen()) {
          setDataLen(other.getDataLen());
        }
        if (other.hasSyncBlock()) {
          setSyncBlock(other.getSyncBlock());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasOffsetInBlock()) {
          return false;
        }
        if (!hasSeqno()) {
          return false;
        }
        if (!hasLastPacketInBlock()) {
          return false;
        }
        if (!hasDataLen()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 9: {
                offsetInBlock_ = input.readSFixed64();
                bitField0_ |= 0x00000001;
                break;
              } // case 9
              case 17: {
                seqno_ = input.readSFixed64();
                bitField0_ |= 0x00000002;
                break;
              } // case 17
              case 24: {
                lastPacketInBlock_ = input.readBool();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 37: {
                dataLen_ = input.readSFixed32();
                bitField0_ |= 0x00000008;
                break;
              } // case 37
              case 40: {
                syncBlock_ = input.readBool();
                bitField0_ |= 0x00000010;
                break;
              } // case 40
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long offsetInBlock_ ;
      /**
       * <pre>
       * All fields must be fixed-length!
       * </pre>
       *
       * <code>required sfixed64 offsetInBlock = 1;</code>
       * @return Whether the offsetInBlock field is set.
       */
      @java.lang.Override
      public boolean hasOffsetInBlock() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <pre>
       * All fields must be fixed-length!
       * </pre>
       *
       * <code>required sfixed64 offsetInBlock = 1;</code>
       * @return The offsetInBlock.
       */
      @java.lang.Override
      public long getOffsetInBlock() {
        return offsetInBlock_;
      }
      /**
       * <pre>
       * All fields must be fixed-length!
       * </pre>
       *
       * <code>required sfixed64 offsetInBlock = 1;</code>
       * @param value The offsetInBlock to set.
       * @return This builder for chaining.
       */
      public Builder setOffsetInBlock(long value) {

        offsetInBlock_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <pre>
       * All fields must be fixed-length!
       * </pre>
       *
       * <code>required sfixed64 offsetInBlock = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearOffsetInBlock() {
        bitField0_ = (bitField0_ & ~0x00000001);
        offsetInBlock_ = 0L;
        onChanged();
        return this;
      }

      private long seqno_ ;
      /**
       * <code>required sfixed64 seqno = 2;</code>
       * @return Whether the seqno field is set.
       */
      @java.lang.Override
      public boolean hasSeqno() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required sfixed64 seqno = 2;</code>
       * @return The seqno.
       */
      @java.lang.Override
      public long getSeqno() {
        return seqno_;
      }
      /**
       * <code>required sfixed64 seqno = 2;</code>
       * @param value The seqno to set.
       * @return This builder for chaining.
       */
      public Builder setSeqno(long value) {

        seqno_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required sfixed64 seqno = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearSeqno() {
        bitField0_ = (bitField0_ & ~0x00000002);
        seqno_ = 0L;
        onChanged();
        return this;
      }

      private boolean lastPacketInBlock_ ;
      /**
       * <code>required bool lastPacketInBlock = 3;</code>
       * @return Whether the lastPacketInBlock field is set.
       */
      @java.lang.Override
      public boolean hasLastPacketInBlock() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bool lastPacketInBlock = 3;</code>
       * @return The lastPacketInBlock.
       */
      @java.lang.Override
      public boolean getLastPacketInBlock() {
        return lastPacketInBlock_;
      }
      /**
       * <code>required bool lastPacketInBlock = 3;</code>
       * @param value The lastPacketInBlock to set.
       * @return This builder for chaining.
       */
      public Builder setLastPacketInBlock(boolean value) {

        lastPacketInBlock_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bool lastPacketInBlock = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearLastPacketInBlock() {
        bitField0_ = (bitField0_ & ~0x00000004);
        lastPacketInBlock_ = false;
        onChanged();
        return this;
      }

      private int dataLen_ ;
      /**
       * <code>required sfixed32 dataLen = 4;</code>
       * @return Whether the dataLen field is set.
       */
      @java.lang.Override
      public boolean hasDataLen() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>required sfixed32 dataLen = 4;</code>
       * @return The dataLen.
       */
      @java.lang.Override
      public int getDataLen() {
        return dataLen_;
      }
      /**
       * <code>required sfixed32 dataLen = 4;</code>
       * @param value The dataLen to set.
       * @return This builder for chaining.
       */
      public Builder setDataLen(int value) {

        dataLen_ = value;
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>required sfixed32 dataLen = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearDataLen() {
        bitField0_ = (bitField0_ & ~0x00000008);
        dataLen_ = 0;
        onChanged();
        return this;
      }

      private boolean syncBlock_ ;
      /**
       * <code>optional bool syncBlock = 5 [default = false];</code>
       * @return Whether the syncBlock field is set.
       */
      @java.lang.Override
      public boolean hasSyncBlock() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional bool syncBlock = 5 [default = false];</code>
       * @return The syncBlock.
       */
      @java.lang.Override
      public boolean getSyncBlock() {
        return syncBlock_;
      }
      /**
       * <code>optional bool syncBlock = 5 [default = false];</code>
       * @param value The syncBlock to set.
       * @return This builder for chaining.
       */
      public Builder setSyncBlock(boolean value) {

        syncBlock_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional bool syncBlock = 5 [default = false];</code>
       * @return This builder for chaining.
       */
      public Builder clearSyncBlock() {
        bitField0_ = (bitField0_ & ~0x00000010);
        syncBlock_ = false;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PacketHeaderProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.PacketHeaderProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<PacketHeaderProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<PacketHeaderProto>() {
      @java.lang.Override
      public PacketHeaderProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<PacketHeaderProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<PacketHeaderProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface PipelineAckProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.PipelineAckProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required sint64 seqno = 1;</code>
     * @return Whether the seqno field is set.
     */
    boolean hasSeqno();
    /**
     * <code>required sint64 seqno = 1;</code>
     * @return The seqno.
     */
    long getSeqno();

    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @return A list containing the reply.
     */
    java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getReplyList();
    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @return The count of reply.
     */
    int getReplyCount();
    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @param index The index of the element to return.
     * @return The reply at the given index.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index);

    /**
     * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
     * @return Whether the downstreamAckTimeNanos field is set.
     */
    boolean hasDownstreamAckTimeNanos();
    /**
     * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
     * @return The downstreamAckTimeNanos.
     */
    long getDownstreamAckTimeNanos();

    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @return A list containing the flag.
     */
    java.util.List<java.lang.Integer> getFlagList();
    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @return The count of flag.
     */
    int getFlagCount();
    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The flag at the given index.
     */
    int getFlag(int index);
  }
  /**
   * Protobuf type {@code hadoop.hdfs.PipelineAckProto}
   */
  public static final class PipelineAckProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.PipelineAckProto)
      PipelineAckProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use PipelineAckProto.newBuilder() to construct.
    private PipelineAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private PipelineAckProto() {
      reply_ = java.util.Collections.emptyList();
      flag_ = emptyIntList();
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new PipelineAckProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
    }

    private int bitField0_;
    public static final int SEQNO_FIELD_NUMBER = 1;
    private long seqno_ = 0L;
    /**
     * <code>required sint64 seqno = 1;</code>
     * @return Whether the seqno field is set.
     */
    @java.lang.Override
    public boolean hasSeqno() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required sint64 seqno = 1;</code>
     * @return The seqno.
     */
    @java.lang.Override
    public long getSeqno() {
      return seqno_;
    }

    public static final int REPLY_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private java.util.List<java.lang.Integer> reply_;
    private static final org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
        java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> reply_converter_ =
            new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter.Converter<
                java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>() {
              public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status convert(java.lang.Integer from) {
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(from);
                return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
              }
            };
    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @return A list containing the reply.
     */
    @java.lang.Override
    public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getReplyList() {
      return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
          java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
    }
    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @return The count of reply.
     */
    @java.lang.Override
    public int getReplyCount() {
      return reply_.size();
    }
    /**
     * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
     * @param index The index of the element to return.
     * @return The reply at the given index.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
      return reply_converter_.convert(reply_.get(index));
    }

    public static final int DOWNSTREAMACKTIMENANOS_FIELD_NUMBER = 3;
    private long downstreamAckTimeNanos_ = 0L;
    /**
     * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
     * @return Whether the downstreamAckTimeNanos field is set.
     */
    @java.lang.Override
    public boolean hasDownstreamAckTimeNanos() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
     * @return The downstreamAckTimeNanos.
     */
    @java.lang.Override
    public long getDownstreamAckTimeNanos() {
      return downstreamAckTimeNanos_;
    }

    public static final int FLAG_FIELD_NUMBER = 4;
    @SuppressWarnings("serial")
    private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_ =
        emptyIntList();
    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @return A list containing the flag.
     */
    @java.lang.Override
    public java.util.List<java.lang.Integer>
        getFlagList() {
      return flag_;
    }
    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @return The count of flag.
     */
    public int getFlagCount() {
      return flag_.size();
    }
    /**
     * <code>repeated uint32 flag = 4 [packed = true];</code>
     * @param index The index of the element to return.
     * @return The flag at the given index.
     */
    public int getFlag(int index) {
      return flag_.getInt(index);
    }
    private int flagMemoizedSerializedSize = -1;

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasSeqno()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      getSerializedSize();
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeSInt64(1, seqno_);
      }
      for (int i = 0; i < reply_.size(); i++) {
        output.writeEnum(2, reply_.get(i));
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(3, downstreamAckTimeNanos_);
      }
      if (getFlagList().size() > 0) {
        output.writeUInt32NoTag(34);
        output.writeUInt32NoTag(flagMemoizedSerializedSize);
      }
      for (int i = 0; i < flag_.size(); i++) {
        output.writeUInt32NoTag(flag_.getInt(i));
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeSInt64Size(1, seqno_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < reply_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeEnumSizeNoTag(reply_.get(i));
        }
        size += dataSize;
        size += 1 * reply_.size();
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(3, downstreamAckTimeNanos_);
      }
      {
        int dataSize = 0;
        for (int i = 0; i < flag_.size(); i++) {
          dataSize += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
            .computeUInt32SizeNoTag(flag_.getInt(i));
        }
        size += dataSize;
        if (!getFlagList().isEmpty()) {
          size += 1;
          size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
              .computeInt32SizeNoTag(dataSize);
        }
        flagMemoizedSerializedSize = dataSize;
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) obj;

      if (hasSeqno() != other.hasSeqno()) return false;
      if (hasSeqno()) {
        if (getSeqno()
            != other.getSeqno()) return false;
      }
      if (!reply_.equals(other.reply_)) return false;
      if (hasDownstreamAckTimeNanos() != other.hasDownstreamAckTimeNanos()) return false;
      if (hasDownstreamAckTimeNanos()) {
        if (getDownstreamAckTimeNanos()
            != other.getDownstreamAckTimeNanos()) return false;
      }
      if (!getFlagList()
          .equals(other.getFlagList())) return false;
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasSeqno()) {
        hash = (37 * hash) + SEQNO_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getSeqno());
      }
      if (getReplyCount() > 0) {
        hash = (37 * hash) + REPLY_FIELD_NUMBER;
        hash = (53 * hash) + reply_.hashCode();
      }
      if (hasDownstreamAckTimeNanos()) {
        hash = (37 * hash) + DOWNSTREAMACKTIMENANOS_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getDownstreamAckTimeNanos());
      }
      if (getFlagCount() > 0) {
        hash = (37 * hash) + FLAG_FIELD_NUMBER;
        hash = (53 * hash) + getFlagList().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.PipelineAckProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.PipelineAckProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        seqno_ = 0L;
        reply_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000002);
        downstreamAckTimeNanos_ = 0L;
        flag_ = emptyIntList();
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto(this);
        buildPartialRepeatedFields(result);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartialRepeatedFields(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result) {
        if (((bitField0_ & 0x00000002) != 0)) {
          reply_ = java.util.Collections.unmodifiableList(reply_);
          bitField0_ = (bitField0_ & ~0x00000002);
        }
        result.reply_ = reply_;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.seqno_ = seqno_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.downstreamAckTimeNanos_ = downstreamAckTimeNanos_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          flag_.makeImmutable();
          result.flag_ = flag_;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto.getDefaultInstance()) return this;
        if (other.hasSeqno()) {
          setSeqno(other.getSeqno());
        }
        if (!other.reply_.isEmpty()) {
          if (reply_.isEmpty()) {
            reply_ = other.reply_;
            bitField0_ = (bitField0_ & ~0x00000002);
          } else {
            ensureReplyIsMutable();
            reply_.addAll(other.reply_);
          }
          onChanged();
        }
        if (other.hasDownstreamAckTimeNanos()) {
          setDownstreamAckTimeNanos(other.getDownstreamAckTimeNanos());
        }
        if (!other.flag_.isEmpty()) {
          if (flag_.isEmpty()) {
            flag_ = other.flag_;
            flag_.makeImmutable();
            bitField0_ |= 0x00000008;
          } else {
            ensureFlagIsMutable();
            flag_.addAll(other.flag_);
          }
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasSeqno()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                seqno_ = input.readSInt64();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(2, tmpRaw);
                } else {
                  ensureReplyIsMutable();
                  reply_.add(tmpRaw);
                }
                break;
              } // case 16
              case 18: {
                int length = input.readRawVarint32();
                int oldLimit = input.pushLimit(length);
                while(input.getBytesUntilLimit() > 0) {
                  int tmpRaw = input.readEnum();
                  org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                  if (tmpValue == null) {
                    mergeUnknownVarintField(2, tmpRaw);
                  } else {
                    ensureReplyIsMutable();
                    reply_.add(tmpRaw);
                  }
                }
                input.popLimit(oldLimit);
                break;
              } // case 18
              case 24: {
                downstreamAckTimeNanos_ = input.readUInt64();
                bitField0_ |= 0x00000004;
                break;
              } // case 24
              case 32: {
                int v = input.readUInt32();
                ensureFlagIsMutable();
                flag_.addInt(v);
                break;
              } // case 32
              case 34: {
                int length = input.readRawVarint32();
                int limit = input.pushLimit(length);
                ensureFlagIsMutable();
                while (input.getBytesUntilLimit() > 0) {
                  flag_.addInt(input.readUInt32());
                }
                input.popLimit(limit);
                break;
              } // case 34
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private long seqno_ ;
      /**
       * <code>required sint64 seqno = 1;</code>
       * @return Whether the seqno field is set.
       */
      @java.lang.Override
      public boolean hasSeqno() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required sint64 seqno = 1;</code>
       * @return The seqno.
       */
      @java.lang.Override
      public long getSeqno() {
        return seqno_;
      }
      /**
       * <code>required sint64 seqno = 1;</code>
       * @param value The seqno to set.
       * @return This builder for chaining.
       */
      public Builder setSeqno(long value) {

        seqno_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required sint64 seqno = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearSeqno() {
        bitField0_ = (bitField0_ & ~0x00000001);
        seqno_ = 0L;
        onChanged();
        return this;
      }

      private java.util.List<java.lang.Integer> reply_ =
        java.util.Collections.emptyList();
      private void ensureReplyIsMutable() {
        if (!((bitField0_ & 0x00000002) != 0)) {
          reply_ = new java.util.ArrayList<java.lang.Integer>(reply_);
          bitField0_ |= 0x00000002;
        }
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @return A list containing the reply.
       */
      public java.util.List<org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> getReplyList() {
        return new org.apache.hadoop.thirdparty.protobuf.Internal.ListAdapter<
            java.lang.Integer, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status>(reply_, reply_converter_);
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @return The count of reply.
       */
      public int getReplyCount() {
        return reply_.size();
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @param index The index of the element to return.
       * @return The reply at the given index.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getReply(int index) {
        return reply_converter_.convert(reply_.get(index));
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @param index The index to set the value at.
       * @param value The reply to set.
       * @return This builder for chaining.
       */
      public Builder setReply(
          int index, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureReplyIsMutable();
        reply_.set(index, value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @param value The reply to add.
       * @return This builder for chaining.
       */
      public Builder addReply(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        ensureReplyIsMutable();
        reply_.add(value.getNumber());
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @param values The reply to add.
       * @return This builder for chaining.
       */
      public Builder addAllReply(
          java.lang.Iterable<? extends org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status> values) {
        ensureReplyIsMutable();
        for (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value : values) {
          reply_.add(value.getNumber());
        }
        onChanged();
        return this;
      }
      /**
       * <code>repeated .hadoop.hdfs.Status reply = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearReply() {
        reply_ = java.util.Collections.emptyList();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }

      private long downstreamAckTimeNanos_ ;
      /**
       * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
       * @return Whether the downstreamAckTimeNanos field is set.
       */
      @java.lang.Override
      public boolean hasDownstreamAckTimeNanos() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
       * @return The downstreamAckTimeNanos.
       */
      @java.lang.Override
      public long getDownstreamAckTimeNanos() {
        return downstreamAckTimeNanos_;
      }
      /**
       * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
       * @param value The downstreamAckTimeNanos to set.
       * @return This builder for chaining.
       */
      public Builder setDownstreamAckTimeNanos(long value) {

        downstreamAckTimeNanos_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional uint64 downstreamAckTimeNanos = 3 [default = 0];</code>
       * @return This builder for chaining.
       */
      public Builder clearDownstreamAckTimeNanos() {
        bitField0_ = (bitField0_ & ~0x00000004);
        downstreamAckTimeNanos_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.Internal.IntList flag_ = emptyIntList();
      private void ensureFlagIsMutable() {
        if (!flag_.isModifiable()) {
          flag_ = makeMutableCopy(flag_);
        }
        bitField0_ |= 0x00000008;
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @return A list containing the flag.
       */
      public java.util.List<java.lang.Integer>
          getFlagList() {
        flag_.makeImmutable();
        return flag_;
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @return The count of flag.
       */
      public int getFlagCount() {
        return flag_.size();
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @param index The index of the element to return.
       * @return The flag at the given index.
       */
      public int getFlag(int index) {
        return flag_.getInt(index);
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @param index The index to set the value at.
       * @param value The flag to set.
       * @return This builder for chaining.
       */
      public Builder setFlag(
          int index, int value) {

        ensureFlagIsMutable();
        flag_.setInt(index, value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @param value The flag to add.
       * @return This builder for chaining.
       */
      public Builder addFlag(int value) {

        ensureFlagIsMutable();
        flag_.addInt(value);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @param values The flag to add.
       * @return This builder for chaining.
       */
      public Builder addAllFlag(
          java.lang.Iterable<? extends java.lang.Integer> values) {
        ensureFlagIsMutable();
        org.apache.hadoop.thirdparty.protobuf.AbstractMessageLite.Builder.addAll(
            values, flag_);
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>repeated uint32 flag = 4 [packed = true];</code>
       * @return This builder for chaining.
       */
      public Builder clearFlag() {
        flag_ = emptyIntList();
        bitField0_ = (bitField0_ & ~0x00000008);
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PipelineAckProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.PipelineAckProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<PipelineAckProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<PipelineAckProto>() {
      @java.lang.Override
      public PipelineAckProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<PipelineAckProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<PipelineAckProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ReadOpChecksumInfoProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ReadOpChecksumInfoProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     * @return Whether the checksum field is set.
     */
    boolean hasChecksum();
    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     * @return The checksum.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum();
    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder();

    /**
     * <pre>
     **
     * The offset into the block at which the first packet
     * will start. This is necessary since reads will align
     * backwards to a checksum chunk boundary.
     * </pre>
     *
     * <code>required uint64 chunkOffset = 2;</code>
     * @return Whether the chunkOffset field is set.
     */
    boolean hasChunkOffset();
    /**
     * <pre>
     **
     * The offset into the block at which the first packet
     * will start. This is necessary since reads will align
     * backwards to a checksum chunk boundary.
     * </pre>
     *
     * <code>required uint64 chunkOffset = 2;</code>
     * @return The chunkOffset.
     */
    long getChunkOffset();
  }
  /**
   * <pre>
   **
   * Sent as part of the BlockOpResponseProto
   * for READ_BLOCK and COPY_BLOCK operations.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
   */
  public static final class ReadOpChecksumInfoProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
      ReadOpChecksumInfoProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ReadOpChecksumInfoProto.newBuilder() to construct.
    private ReadOpChecksumInfoProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ReadOpChecksumInfoProto() {
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ReadOpChecksumInfoProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
    }

    private int bitField0_;
    public static final int CHECKSUM_FIELD_NUMBER = 1;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     * @return Whether the checksum field is set.
     */
    @java.lang.Override
    public boolean hasChecksum() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     * @return The checksum.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
      return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
    }
    /**
     * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
      return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
    }

    public static final int CHUNKOFFSET_FIELD_NUMBER = 2;
    private long chunkOffset_ = 0L;
    /**
     * <pre>
     **
     * The offset into the block at which the first packet
     * will start. This is necessary since reads will align
     * backwards to a checksum chunk boundary.
     * </pre>
     *
     * <code>required uint64 chunkOffset = 2;</code>
     * @return Whether the chunkOffset field is set.
     */
    @java.lang.Override
    public boolean hasChunkOffset() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <pre>
     **
     * The offset into the block at which the first packet
     * will start. This is necessary since reads will align
     * backwards to a checksum chunk boundary.
     * </pre>
     *
     * <code>required uint64 chunkOffset = 2;</code>
     * @return The chunkOffset.
     */
    @java.lang.Override
    public long getChunkOffset() {
      return chunkOffset_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasChunkOffset()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!getChecksum().isInitialized()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeMessage(1, getChecksum());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, chunkOffset_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(1, getChecksum());
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, chunkOffset_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) obj;

      if (hasChecksum() != other.hasChecksum()) return false;
      if (hasChecksum()) {
        if (!getChecksum()
            .equals(other.getChecksum())) return false;
      }
      if (hasChunkOffset() != other.hasChunkOffset()) return false;
      if (hasChunkOffset()) {
        if (getChunkOffset()
            != other.getChunkOffset()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasChecksum()) {
        hash = (37 * hash) + CHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getChecksum().hashCode();
      }
      if (hasChunkOffset()) {
        hash = (37 * hash) + CHUNKOFFSET_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getChunkOffset());
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Sent as part of the BlockOpResponseProto
     * for READ_BLOCK and COPY_BLOCK operations.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ReadOpChecksumInfoProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ReadOpChecksumInfoProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getChecksumFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        checksum_ = null;
        if (checksumBuilder_ != null) {
          checksumBuilder_.dispose();
          checksumBuilder_ = null;
        }
        chunkOffset_ = 0L;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.checksum_ = checksumBuilder_ == null
              ? checksum_
              : checksumBuilder_.build();
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.chunkOffset_ = chunkOffset_;
          to_bitField0_ |= 0x00000002;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) return this;
        if (other.hasChecksum()) {
          mergeChecksum(other.getChecksum());
        }
        if (other.hasChunkOffset()) {
          setChunkOffset(other.getChunkOffset());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasChecksum()) {
          return false;
        }
        if (!hasChunkOffset()) {
          return false;
        }
        if (!getChecksum().isInitialized()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                input.readMessage(
                    getChecksumFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              case 16: {
                chunkOffset_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto checksum_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> checksumBuilder_;
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       * @return Whether the checksum field is set.
       */
      public boolean hasChecksum() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       * @return The checksum.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto getChecksum() {
        if (checksumBuilder_ == null) {
          return checksum_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
        } else {
          return checksumBuilder_.getMessage();
        }
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public Builder setChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
        if (checksumBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          checksum_ = value;
        } else {
          checksumBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public Builder setChecksum(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder builderForValue) {
        if (checksumBuilder_ == null) {
          checksum_ = builderForValue.build();
        } else {
          checksumBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public Builder mergeChecksum(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto value) {
        if (checksumBuilder_ == null) {
          if (((bitField0_ & 0x00000001) != 0) &&
            checksum_ != null &&
            checksum_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance()) {
            getChecksumBuilder().mergeFrom(value);
          } else {
            checksum_ = value;
          }
        } else {
          checksumBuilder_.mergeFrom(value);
        }
        if (checksum_ != null) {
          bitField0_ |= 0x00000001;
          onChanged();
        }
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public Builder clearChecksum() {
        bitField0_ = (bitField0_ & ~0x00000001);
        checksum_ = null;
        if (checksumBuilder_ != null) {
          checksumBuilder_.dispose();
          checksumBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder getChecksumBuilder() {
        bitField0_ |= 0x00000001;
        onChanged();
        return getChecksumFieldBuilder().getBuilder();
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder getChecksumOrBuilder() {
        if (checksumBuilder_ != null) {
          return checksumBuilder_.getMessageOrBuilder();
        } else {
          return checksum_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.getDefaultInstance() : checksum_;
        }
      }
      /**
       * <code>required .hadoop.hdfs.ChecksumProto checksum = 1;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder> 
          getChecksumFieldBuilder() {
        if (checksumBuilder_ == null) {
          checksumBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProtoOrBuilder>(
                  getChecksum(),
                  getParentForChildren(),
                  isClean());
          checksum_ = null;
        }
        return checksumBuilder_;
      }

      private long chunkOffset_ ;
      /**
       * <pre>
       **
       * The offset into the block at which the first packet
       * will start. This is necessary since reads will align
       * backwards to a checksum chunk boundary.
       * </pre>
       *
       * <code>required uint64 chunkOffset = 2;</code>
       * @return Whether the chunkOffset field is set.
       */
      @java.lang.Override
      public boolean hasChunkOffset() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <pre>
       **
       * The offset into the block at which the first packet
       * will start. This is necessary since reads will align
       * backwards to a checksum chunk boundary.
       * </pre>
       *
       * <code>required uint64 chunkOffset = 2;</code>
       * @return The chunkOffset.
       */
      @java.lang.Override
      public long getChunkOffset() {
        return chunkOffset_;
      }
      /**
       * <pre>
       **
       * The offset into the block at which the first packet
       * will start. This is necessary since reads will align
       * backwards to a checksum chunk boundary.
       * </pre>
       *
       * <code>required uint64 chunkOffset = 2;</code>
       * @param value The chunkOffset to set.
       * @return This builder for chaining.
       */
      public Builder setChunkOffset(long value) {

        chunkOffset_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <pre>
       **
       * The offset into the block at which the first packet
       * will start. This is necessary since reads will align
       * backwards to a checksum chunk boundary.
       * </pre>
       *
       * <code>required uint64 chunkOffset = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearChunkOffset() {
        bitField0_ = (bitField0_ & ~0x00000002);
        chunkOffset_ = 0L;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ReadOpChecksumInfoProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ReadOpChecksumInfoProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ReadOpChecksumInfoProto>() {
      @java.lang.Override
      public ReadOpChecksumInfoProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ReadOpChecksumInfoProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ReadOpChecksumInfoProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface BlockOpResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.BlockOpResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();

    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return Whether the firstBadLink field is set.
     */
    boolean hasFirstBadLink();
    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return The firstBadLink.
     */
    java.lang.String getFirstBadLink();
    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return The bytes for firstBadLink.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getFirstBadLinkBytes();

    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     * @return Whether the checksumResponse field is set.
     */
    boolean hasChecksumResponse();
    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     * @return The checksumResponse.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse();
    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder();

    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     * @return Whether the readOpChecksumInfo field is set.
     */
    boolean hasReadOpChecksumInfo();
    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     * @return The readOpChecksumInfo.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo();
    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder();

    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return Whether the message field is set.
     */
    boolean hasMessage();
    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return The message.
     */
    java.lang.String getMessage();
    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return The bytes for message.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes();

    /**
     * <pre>
     ** If the server chooses to agree to the request of a client for
     * short-circuit access, it will send a response message with the relevant
     * file descriptors attached.
     *
     * In the body of the message, this version number will be set to the
     * specific version number of the block data that the client is about to
     * read.
     * </pre>
     *
     * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
     * @return Whether the shortCircuitAccessVersion field is set.
     */
    boolean hasShortCircuitAccessVersion();
    /**
     * <pre>
     ** If the server chooses to agree to the request of a client for
     * short-circuit access, it will send a response message with the relevant
     * file descriptors attached.
     *
     * In the body of the message, this version number will be set to the
     * specific version number of the block data that the client is about to
     * read.
     * </pre>
     *
     * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
     * @return The shortCircuitAccessVersion.
     */
    int getShortCircuitAccessVersion();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
   */
  public static final class BlockOpResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.BlockOpResponseProto)
      BlockOpResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use BlockOpResponseProto.newBuilder() to construct.
    private BlockOpResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private BlockOpResponseProto() {
      status_ = 0;
      firstBadLink_ = "";
      message_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new BlockOpResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
    }

    public static final int FIRSTBADLINK_FIELD_NUMBER = 2;
    @SuppressWarnings("serial")
    private volatile java.lang.Object firstBadLink_ = "";
    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return Whether the firstBadLink field is set.
     */
    @java.lang.Override
    public boolean hasFirstBadLink() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return The firstBadLink.
     */
    @java.lang.Override
    public java.lang.String getFirstBadLink() {
      java.lang.Object ref = firstBadLink_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          firstBadLink_ = s;
        }
        return s;
      }
    }
    /**
     * <code>optional string firstBadLink = 2;</code>
     * @return The bytes for firstBadLink.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getFirstBadLinkBytes() {
      java.lang.Object ref = firstBadLink_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        firstBadLink_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int CHECKSUMRESPONSE_FIELD_NUMBER = 3;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     * @return Whether the checksumResponse field is set.
     */
    @java.lang.Override
    public boolean hasChecksumResponse() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     * @return The checksumResponse.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
      return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
    }
    /**
     * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
      return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
    }

    public static final int READOPCHECKSUMINFO_FIELD_NUMBER = 4;
    private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     * @return Whether the readOpChecksumInfo field is set.
     */
    @java.lang.Override
    public boolean hasReadOpChecksumInfo() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     * @return The readOpChecksumInfo.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
      return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
    }
    /**
     * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
      return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
    }

    public static final int MESSAGE_FIELD_NUMBER = 5;
    @SuppressWarnings("serial")
    private volatile java.lang.Object message_ = "";
    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return Whether the message field is set.
     */
    @java.lang.Override
    public boolean hasMessage() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return The message.
     */
    @java.lang.Override
    public java.lang.String getMessage() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          message_ = s;
        }
        return s;
      }
    }
    /**
     * <pre>
     ** explanatory text which may be useful to log on the client side 
     * </pre>
     *
     * <code>optional string message = 5;</code>
     * @return The bytes for message.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getMessageBytes() {
      java.lang.Object ref = message_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        message_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    public static final int SHORTCIRCUITACCESSVERSION_FIELD_NUMBER = 6;
    private int shortCircuitAccessVersion_ = 0;
    /**
     * <pre>
     ** If the server chooses to agree to the request of a client for
     * short-circuit access, it will send a response message with the relevant
     * file descriptors attached.
     *
     * In the body of the message, this version number will be set to the
     * specific version number of the block data that the client is about to
     * read.
     * </pre>
     *
     * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
     * @return Whether the shortCircuitAccessVersion field is set.
     */
    @java.lang.Override
    public boolean hasShortCircuitAccessVersion() {
      return ((bitField0_ & 0x00000020) != 0);
    }
    /**
     * <pre>
     ** If the server chooses to agree to the request of a client for
     * short-circuit access, it will send a response message with the relevant
     * file descriptors attached.
     *
     * In the body of the message, this version number will be set to the
     * specific version number of the block data that the client is about to
     * read.
     * </pre>
     *
     * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
     * @return The shortCircuitAccessVersion.
     */
    @java.lang.Override
    public int getShortCircuitAccessVersion() {
      return shortCircuitAccessVersion_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (hasChecksumResponse()) {
        if (!getChecksumResponse().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      if (hasReadOpChecksumInfo()) {
        if (!getReadOpChecksumInfo().isInitialized()) {
          memoizedIsInitialized = 0;
          return false;
        }
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 2, firstBadLink_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeMessage(3, getChecksumResponse());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeMessage(4, getReadOpChecksumInfo());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 5, message_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        output.writeUInt32(6, shortCircuitAccessVersion_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(2, firstBadLink_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(3, getChecksumResponse());
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(4, getReadOpChecksumInfo());
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(5, message_);
      }
      if (((bitField0_ & 0x00000020) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(6, shortCircuitAccessVersion_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (hasFirstBadLink() != other.hasFirstBadLink()) return false;
      if (hasFirstBadLink()) {
        if (!getFirstBadLink()
            .equals(other.getFirstBadLink())) return false;
      }
      if (hasChecksumResponse() != other.hasChecksumResponse()) return false;
      if (hasChecksumResponse()) {
        if (!getChecksumResponse()
            .equals(other.getChecksumResponse())) return false;
      }
      if (hasReadOpChecksumInfo() != other.hasReadOpChecksumInfo()) return false;
      if (hasReadOpChecksumInfo()) {
        if (!getReadOpChecksumInfo()
            .equals(other.getReadOpChecksumInfo())) return false;
      }
      if (hasMessage() != other.hasMessage()) return false;
      if (hasMessage()) {
        if (!getMessage()
            .equals(other.getMessage())) return false;
      }
      if (hasShortCircuitAccessVersion() != other.hasShortCircuitAccessVersion()) return false;
      if (hasShortCircuitAccessVersion()) {
        if (getShortCircuitAccessVersion()
            != other.getShortCircuitAccessVersion()) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      if (hasFirstBadLink()) {
        hash = (37 * hash) + FIRSTBADLINK_FIELD_NUMBER;
        hash = (53 * hash) + getFirstBadLink().hashCode();
      }
      if (hasChecksumResponse()) {
        hash = (37 * hash) + CHECKSUMRESPONSE_FIELD_NUMBER;
        hash = (53 * hash) + getChecksumResponse().hashCode();
      }
      if (hasReadOpChecksumInfo()) {
        hash = (37 * hash) + READOPCHECKSUMINFO_FIELD_NUMBER;
        hash = (53 * hash) + getReadOpChecksumInfo().hashCode();
      }
      if (hasMessage()) {
        hash = (37 * hash) + MESSAGE_FIELD_NUMBER;
        hash = (53 * hash) + getMessage().hashCode();
      }
      if (hasShortCircuitAccessVersion()) {
        hash = (37 * hash) + SHORTCIRCUITACCESSVERSION_FIELD_NUMBER;
        hash = (53 * hash) + getShortCircuitAccessVersion();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.BlockOpResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.BlockOpResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getChecksumResponseFieldBuilder();
          getReadOpChecksumInfoFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        firstBadLink_ = "";
        checksumResponse_ = null;
        if (checksumResponseBuilder_ != null) {
          checksumResponseBuilder_.dispose();
          checksumResponseBuilder_ = null;
        }
        readOpChecksumInfo_ = null;
        if (readOpChecksumInfoBuilder_ != null) {
          readOpChecksumInfoBuilder_.dispose();
          readOpChecksumInfoBuilder_ = null;
        }
        message_ = "";
        shortCircuitAccessVersion_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.firstBadLink_ = firstBadLink_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.checksumResponse_ = checksumResponseBuilder_ == null
              ? checksumResponse_
              : checksumResponseBuilder_.build();
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.readOpChecksumInfo_ = readOpChecksumInfoBuilder_ == null
              ? readOpChecksumInfo_
              : readOpChecksumInfoBuilder_.build();
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.message_ = message_;
          to_bitField0_ |= 0x00000010;
        }
        if (((from_bitField0_ & 0x00000020) != 0)) {
          result.shortCircuitAccessVersion_ = shortCircuitAccessVersion_;
          to_bitField0_ |= 0x00000020;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        if (other.hasFirstBadLink()) {
          firstBadLink_ = other.firstBadLink_;
          bitField0_ |= 0x00000002;
          onChanged();
        }
        if (other.hasChecksumResponse()) {
          mergeChecksumResponse(other.getChecksumResponse());
        }
        if (other.hasReadOpChecksumInfo()) {
          mergeReadOpChecksumInfo(other.getReadOpChecksumInfo());
        }
        if (other.hasMessage()) {
          message_ = other.message_;
          bitField0_ |= 0x00000010;
          onChanged();
        }
        if (other.hasShortCircuitAccessVersion()) {
          setShortCircuitAccessVersion(other.getShortCircuitAccessVersion());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        if (hasChecksumResponse()) {
          if (!getChecksumResponse().isInitialized()) {
            return false;
          }
        }
        if (hasReadOpChecksumInfo()) {
          if (!getReadOpChecksumInfo().isInitialized()) {
            return false;
          }
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              case 18: {
                firstBadLink_ = input.readBytes();
                bitField0_ |= 0x00000002;
                break;
              } // case 18
              case 26: {
                input.readMessage(
                    getChecksumResponseFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 34: {
                input.readMessage(
                    getReadOpChecksumInfoFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000008;
                break;
              } // case 34
              case 42: {
                message_ = input.readBytes();
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              case 48: {
                shortCircuitAccessVersion_ = input.readUInt32();
                bitField0_ |= 0x00000020;
                break;
              } // case 48
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }

      private java.lang.Object firstBadLink_ = "";
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @return Whether the firstBadLink field is set.
       */
      public boolean hasFirstBadLink() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @return The firstBadLink.
       */
      public java.lang.String getFirstBadLink() {
        java.lang.Object ref = firstBadLink_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            firstBadLink_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @return The bytes for firstBadLink.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getFirstBadLinkBytes() {
        java.lang.Object ref = firstBadLink_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          firstBadLink_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @param value The firstBadLink to set.
       * @return This builder for chaining.
       */
      public Builder setFirstBadLink(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        firstBadLink_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearFirstBadLink() {
        firstBadLink_ = getDefaultInstance().getFirstBadLink();
        bitField0_ = (bitField0_ & ~0x00000002);
        onChanged();
        return this;
      }
      /**
       * <code>optional string firstBadLink = 2;</code>
       * @param value The bytes for firstBadLink to set.
       * @return This builder for chaining.
       */
      public Builder setFirstBadLinkBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        firstBadLink_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto checksumResponse_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> checksumResponseBuilder_;
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       * @return Whether the checksumResponse field is set.
       */
      public boolean hasChecksumResponse() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       * @return The checksumResponse.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getChecksumResponse() {
        if (checksumResponseBuilder_ == null) {
          return checksumResponse_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
        } else {
          return checksumResponseBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public Builder setChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
        if (checksumResponseBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          checksumResponse_ = value;
        } else {
          checksumResponseBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public Builder setChecksumResponse(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder builderForValue) {
        if (checksumResponseBuilder_ == null) {
          checksumResponse_ = builderForValue.build();
        } else {
          checksumResponseBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public Builder mergeChecksumResponse(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto value) {
        if (checksumResponseBuilder_ == null) {
          if (((bitField0_ & 0x00000004) != 0) &&
            checksumResponse_ != null &&
            checksumResponse_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) {
            getChecksumResponseBuilder().mergeFrom(value);
          } else {
            checksumResponse_ = value;
          }
        } else {
          checksumResponseBuilder_.mergeFrom(value);
        }
        if (checksumResponse_ != null) {
          bitField0_ |= 0x00000004;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public Builder clearChecksumResponse() {
        bitField0_ = (bitField0_ & ~0x00000004);
        checksumResponse_ = null;
        if (checksumResponseBuilder_ != null) {
          checksumResponseBuilder_.dispose();
          checksumResponseBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder getChecksumResponseBuilder() {
        bitField0_ |= 0x00000004;
        onChanged();
        return getChecksumResponseFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder getChecksumResponseOrBuilder() {
        if (checksumResponseBuilder_ != null) {
          return checksumResponseBuilder_.getMessageOrBuilder();
        } else {
          return checksumResponse_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance() : checksumResponse_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.OpBlockChecksumResponseProto checksumResponse = 3;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder> 
          getChecksumResponseFieldBuilder() {
        if (checksumResponseBuilder_ == null) {
          checksumResponseBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder>(
                  getChecksumResponse(),
                  getParentForChildren(),
                  isClean());
          checksumResponse_ = null;
        }
        return checksumResponseBuilder_;
      }

      private org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto readOpChecksumInfo_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> readOpChecksumInfoBuilder_;
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       * @return Whether the readOpChecksumInfo field is set.
       */
      public boolean hasReadOpChecksumInfo() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       * @return The readOpChecksumInfo.
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto getReadOpChecksumInfo() {
        if (readOpChecksumInfoBuilder_ == null) {
          return readOpChecksumInfo_ == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
        } else {
          return readOpChecksumInfoBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public Builder setReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
        if (readOpChecksumInfoBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          readOpChecksumInfo_ = value;
        } else {
          readOpChecksumInfoBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public Builder setReadOpChecksumInfo(
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder builderForValue) {
        if (readOpChecksumInfoBuilder_ == null) {
          readOpChecksumInfo_ = builderForValue.build();
        } else {
          readOpChecksumInfoBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000008;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public Builder mergeReadOpChecksumInfo(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto value) {
        if (readOpChecksumInfoBuilder_ == null) {
          if (((bitField0_ & 0x00000008) != 0) &&
            readOpChecksumInfo_ != null &&
            readOpChecksumInfo_ != org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance()) {
            getReadOpChecksumInfoBuilder().mergeFrom(value);
          } else {
            readOpChecksumInfo_ = value;
          }
        } else {
          readOpChecksumInfoBuilder_.mergeFrom(value);
        }
        if (readOpChecksumInfo_ != null) {
          bitField0_ |= 0x00000008;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public Builder clearReadOpChecksumInfo() {
        bitField0_ = (bitField0_ & ~0x00000008);
        readOpChecksumInfo_ = null;
        if (readOpChecksumInfoBuilder_ != null) {
          readOpChecksumInfoBuilder_.dispose();
          readOpChecksumInfoBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder getReadOpChecksumInfoBuilder() {
        bitField0_ |= 0x00000008;
        onChanged();
        return getReadOpChecksumInfoFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder getReadOpChecksumInfoOrBuilder() {
        if (readOpChecksumInfoBuilder_ != null) {
          return readOpChecksumInfoBuilder_.getMessageOrBuilder();
        } else {
          return readOpChecksumInfo_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.getDefaultInstance() : readOpChecksumInfo_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.ReadOpChecksumInfoProto readOpChecksumInfo = 4;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder> 
          getReadOpChecksumInfoFieldBuilder() {
        if (readOpChecksumInfoBuilder_ == null) {
          readOpChecksumInfoBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProtoOrBuilder>(
                  getReadOpChecksumInfo(),
                  getParentForChildren(),
                  isClean());
          readOpChecksumInfo_ = null;
        }
        return readOpChecksumInfoBuilder_;
      }

      private java.lang.Object message_ = "";
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @return Whether the message field is set.
       */
      public boolean hasMessage() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @return The message.
       */
      public java.lang.String getMessage() {
        java.lang.Object ref = message_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            message_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @return The bytes for message.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getMessageBytes() {
        java.lang.Object ref = message_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          message_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @param value The message to set.
       * @return This builder for chaining.
       */
      public Builder setMessage(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @return This builder for chaining.
       */
      public Builder clearMessage() {
        message_ = getDefaultInstance().getMessage();
        bitField0_ = (bitField0_ & ~0x00000010);
        onChanged();
        return this;
      }
      /**
       * <pre>
       ** explanatory text which may be useful to log on the client side 
       * </pre>
       *
       * <code>optional string message = 5;</code>
       * @param value The bytes for message to set.
       * @return This builder for chaining.
       */
      public Builder setMessageBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        message_ = value;
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }

      private int shortCircuitAccessVersion_ ;
      /**
       * <pre>
       ** If the server chooses to agree to the request of a client for
       * short-circuit access, it will send a response message with the relevant
       * file descriptors attached.
       *
       * In the body of the message, this version number will be set to the
       * specific version number of the block data that the client is about to
       * read.
       * </pre>
       *
       * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
       * @return Whether the shortCircuitAccessVersion field is set.
       */
      @java.lang.Override
      public boolean hasShortCircuitAccessVersion() {
        return ((bitField0_ & 0x00000020) != 0);
      }
      /**
       * <pre>
       ** If the server chooses to agree to the request of a client for
       * short-circuit access, it will send a response message with the relevant
       * file descriptors attached.
       *
       * In the body of the message, this version number will be set to the
       * specific version number of the block data that the client is about to
       * read.
       * </pre>
       *
       * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
       * @return The shortCircuitAccessVersion.
       */
      @java.lang.Override
      public int getShortCircuitAccessVersion() {
        return shortCircuitAccessVersion_;
      }
      /**
       * <pre>
       ** If the server chooses to agree to the request of a client for
       * short-circuit access, it will send a response message with the relevant
       * file descriptors attached.
       *
       * In the body of the message, this version number will be set to the
       * specific version number of the block data that the client is about to
       * read.
       * </pre>
       *
       * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
       * @param value The shortCircuitAccessVersion to set.
       * @return This builder for chaining.
       */
      public Builder setShortCircuitAccessVersion(int value) {

        shortCircuitAccessVersion_ = value;
        bitField0_ |= 0x00000020;
        onChanged();
        return this;
      }
      /**
       * <pre>
       ** If the server chooses to agree to the request of a client for
       * short-circuit access, it will send a response message with the relevant
       * file descriptors attached.
       *
       * In the body of the message, this version number will be set to the
       * specific version number of the block data that the client is about to
       * read.
       * </pre>
       *
       * <code>optional uint32 shortCircuitAccessVersion = 6;</code>
       * @return This builder for chaining.
       */
      public Builder clearShortCircuitAccessVersion() {
        bitField0_ = (bitField0_ & ~0x00000020);
        shortCircuitAccessVersion_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.BlockOpResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.BlockOpResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<BlockOpResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<BlockOpResponseProto>() {
      @java.lang.Override
      public BlockOpResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<BlockOpResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<BlockOpResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface ClientReadStatusProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.ClientReadStatusProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
  }
  /**
   * <pre>
   **
   * Message sent from the client to the DN after reading the entire
   * read request.
   * </pre>
   *
   * Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
   */
  public static final class ClientReadStatusProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.ClientReadStatusProto)
      ClientReadStatusProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use ClientReadStatusProto.newBuilder() to construct.
    private ClientReadStatusProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private ClientReadStatusProto() {
      status_ = 0;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new ClientReadStatusProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * <pre>
     **
     * Message sent from the client to the DN after reading the entire
     * read request.
     * </pre>
     *
     * Protobuf type {@code hadoop.hdfs.ClientReadStatusProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.ClientReadStatusProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.ClientReadStatusProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.ClientReadStatusProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<ClientReadStatusProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<ClientReadStatusProto>() {
      @java.lang.Override
      public ClientReadStatusProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<ClientReadStatusProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<ClientReadStatusProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface DNTransferAckProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.DNTransferAckProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    boolean hasStatus();
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
   */
  public static final class DNTransferAckProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.DNTransferAckProto)
      DNTransferAckProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use DNTransferAckProto.newBuilder() to construct.
    private DNTransferAckProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private DNTransferAckProto() {
      status_ = 0;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new DNTransferAckProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
    }

    private int bitField0_;
    public static final int STATUS_FIELD_NUMBER = 1;
    private int status_ = 0;
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return Whether the status field is set.
     */
    @java.lang.Override public boolean hasStatus() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required .hadoop.hdfs.Status status = 1;</code>
     * @return The status.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasStatus()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeEnum(1, status_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(1, status_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) obj;

      if (hasStatus() != other.hasStatus()) return false;
      if (hasStatus()) {
        if (status_ != other.status_) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasStatus()) {
        hash = (37 * hash) + STATUS_FIELD_NUMBER;
        hash = (53 * hash) + status_;
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.DNTransferAckProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.DNTransferAckProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        status_ = 0;
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.status_ = status_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto.getDefaultInstance()) return this;
        if (other.hasStatus()) {
          setStatus(other.getStatus());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasStatus()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(1, tmpRaw);
                } else {
                  status_ = tmpRaw;
                  bitField0_ |= 0x00000001;
                }
                break;
              } // case 8
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int status_ = 0;
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return Whether the status field is set.
       */
      @java.lang.Override public boolean hasStatus() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return The status.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status getStatus() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status result = org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.forNumber(status_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS : result;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @param value The status to set.
       * @return This builder for chaining.
       */
      public Builder setStatus(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000001;
        status_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>required .hadoop.hdfs.Status status = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearStatus() {
        bitField0_ = (bitField0_ & ~0x00000001);
        status_ = 0;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.DNTransferAckProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.DNTransferAckProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<DNTransferAckProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<DNTransferAckProto>() {
      @java.lang.Override
      public DNTransferAckProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<DNTransferAckProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<DNTransferAckProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpBlockChecksumResponseProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpBlockChecksumResponseProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required uint32 bytesPerCrc = 1;</code>
     * @return Whether the bytesPerCrc field is set.
     */
    boolean hasBytesPerCrc();
    /**
     * <code>required uint32 bytesPerCrc = 1;</code>
     * @return The bytesPerCrc.
     */
    int getBytesPerCrc();

    /**
     * <code>required uint64 crcPerBlock = 2;</code>
     * @return Whether the crcPerBlock field is set.
     */
    boolean hasCrcPerBlock();
    /**
     * <code>required uint64 crcPerBlock = 2;</code>
     * @return The crcPerBlock.
     */
    long getCrcPerBlock();

    /**
     * <code>required bytes blockChecksum = 3;</code>
     * @return Whether the blockChecksum field is set.
     */
    boolean hasBlockChecksum();
    /**
     * <code>required bytes blockChecksum = 3;</code>
     * @return The blockChecksum.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum();

    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
     * @return Whether the crcType field is set.
     */
    boolean hasCrcType();
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
     * @return The crcType.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType();

    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    boolean hasBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     * @return The blockChecksumOptions.
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions();
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     */
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
   */
  public static final class OpBlockChecksumResponseProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
      OpBlockChecksumResponseProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpBlockChecksumResponseProto.newBuilder() to construct.
    private OpBlockChecksumResponseProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpBlockChecksumResponseProto() {
      blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      crcType_ = 0;
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpBlockChecksumResponseProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
    }

    private int bitField0_;
    public static final int BYTESPERCRC_FIELD_NUMBER = 1;
    private int bytesPerCrc_ = 0;
    /**
     * <code>required uint32 bytesPerCrc = 1;</code>
     * @return Whether the bytesPerCrc field is set.
     */
    @java.lang.Override
    public boolean hasBytesPerCrc() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required uint32 bytesPerCrc = 1;</code>
     * @return The bytesPerCrc.
     */
    @java.lang.Override
    public int getBytesPerCrc() {
      return bytesPerCrc_;
    }

    public static final int CRCPERBLOCK_FIELD_NUMBER = 2;
    private long crcPerBlock_ = 0L;
    /**
     * <code>required uint64 crcPerBlock = 2;</code>
     * @return Whether the crcPerBlock field is set.
     */
    @java.lang.Override
    public boolean hasCrcPerBlock() {
      return ((bitField0_ & 0x00000002) != 0);
    }
    /**
     * <code>required uint64 crcPerBlock = 2;</code>
     * @return The crcPerBlock.
     */
    @java.lang.Override
    public long getCrcPerBlock() {
      return crcPerBlock_;
    }

    public static final int BLOCKCHECKSUM_FIELD_NUMBER = 3;
    private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
    /**
     * <code>required bytes blockChecksum = 3;</code>
     * @return Whether the blockChecksum field is set.
     */
    @java.lang.Override
    public boolean hasBlockChecksum() {
      return ((bitField0_ & 0x00000004) != 0);
    }
    /**
     * <code>required bytes blockChecksum = 3;</code>
     * @return The blockChecksum.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
      return blockChecksum_;
    }

    public static final int CRCTYPE_FIELD_NUMBER = 4;
    private int crcType_ = 0;
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
     * @return Whether the crcType field is set.
     */
    @java.lang.Override public boolean hasCrcType() {
      return ((bitField0_ & 0x00000008) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
     * @return The crcType.
     */
    @java.lang.Override public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
      org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(crcType_);
      return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
    }

    public static final int BLOCKCHECKSUMOPTIONS_FIELD_NUMBER = 5;
    private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     * @return Whether the blockChecksumOptions field is set.
     */
    @java.lang.Override
    public boolean hasBlockChecksumOptions() {
      return ((bitField0_ & 0x00000010) != 0);
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     * @return The blockChecksumOptions.
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }
    /**
     * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
     */
    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
      return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasBytesPerCrc()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasCrcPerBlock()) {
        memoizedIsInitialized = 0;
        return false;
      }
      if (!hasBlockChecksum()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        output.writeUInt32(1, bytesPerCrc_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        output.writeUInt64(2, crcPerBlock_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        output.writeBytes(3, blockChecksum_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        output.writeEnum(4, crcType_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        output.writeMessage(5, getBlockChecksumOptions());
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt32Size(1, bytesPerCrc_);
      }
      if (((bitField0_ & 0x00000002) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeUInt64Size(2, crcPerBlock_);
      }
      if (((bitField0_ & 0x00000004) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeBytesSize(3, blockChecksum_);
      }
      if (((bitField0_ & 0x00000008) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeEnumSize(4, crcType_);
      }
      if (((bitField0_ & 0x00000010) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.CodedOutputStream
          .computeMessageSize(5, getBlockChecksumOptions());
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) obj;

      if (hasBytesPerCrc() != other.hasBytesPerCrc()) return false;
      if (hasBytesPerCrc()) {
        if (getBytesPerCrc()
            != other.getBytesPerCrc()) return false;
      }
      if (hasCrcPerBlock() != other.hasCrcPerBlock()) return false;
      if (hasCrcPerBlock()) {
        if (getCrcPerBlock()
            != other.getCrcPerBlock()) return false;
      }
      if (hasBlockChecksum() != other.hasBlockChecksum()) return false;
      if (hasBlockChecksum()) {
        if (!getBlockChecksum()
            .equals(other.getBlockChecksum())) return false;
      }
      if (hasCrcType() != other.hasCrcType()) return false;
      if (hasCrcType()) {
        if (crcType_ != other.crcType_) return false;
      }
      if (hasBlockChecksumOptions() != other.hasBlockChecksumOptions()) return false;
      if (hasBlockChecksumOptions()) {
        if (!getBlockChecksumOptions()
            .equals(other.getBlockChecksumOptions())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasBytesPerCrc()) {
        hash = (37 * hash) + BYTESPERCRC_FIELD_NUMBER;
        hash = (53 * hash) + getBytesPerCrc();
      }
      if (hasCrcPerBlock()) {
        hash = (37 * hash) + CRCPERBLOCK_FIELD_NUMBER;
        hash = (53 * hash) + org.apache.hadoop.thirdparty.protobuf.Internal.hashLong(
            getCrcPerBlock());
      }
      if (hasBlockChecksum()) {
        hash = (37 * hash) + BLOCKCHECKSUM_FIELD_NUMBER;
        hash = (53 * hash) + getBlockChecksum().hashCode();
      }
      if (hasCrcType()) {
        hash = (37 * hash) + CRCTYPE_FIELD_NUMBER;
        hash = (53 * hash) + crcType_;
      }
      if (hasBlockChecksumOptions()) {
        hash = (37 * hash) + BLOCKCHECKSUMOPTIONS_FIELD_NUMBER;
        hash = (53 * hash) + getBlockChecksumOptions().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpBlockChecksumResponseProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpBlockChecksumResponseProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.newBuilder()
      private Builder() {
        maybeForceBuilderInitialization();
      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);
        maybeForceBuilderInitialization();
      }
      private void maybeForceBuilderInitialization() {
        if (org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
                .alwaysUseFieldBuilders) {
          getBlockChecksumOptionsFieldBuilder();
        }
      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        bytesPerCrc_ = 0;
        crcPerBlock_ = 0L;
        blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
        crcType_ = 0;
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.bytesPerCrc_ = bytesPerCrc_;
          to_bitField0_ |= 0x00000001;
        }
        if (((from_bitField0_ & 0x00000002) != 0)) {
          result.crcPerBlock_ = crcPerBlock_;
          to_bitField0_ |= 0x00000002;
        }
        if (((from_bitField0_ & 0x00000004) != 0)) {
          result.blockChecksum_ = blockChecksum_;
          to_bitField0_ |= 0x00000004;
        }
        if (((from_bitField0_ & 0x00000008) != 0)) {
          result.crcType_ = crcType_;
          to_bitField0_ |= 0x00000008;
        }
        if (((from_bitField0_ & 0x00000010) != 0)) {
          result.blockChecksumOptions_ = blockChecksumOptionsBuilder_ == null
              ? blockChecksumOptions_
              : blockChecksumOptionsBuilder_.build();
          to_bitField0_ |= 0x00000010;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto.getDefaultInstance()) return this;
        if (other.hasBytesPerCrc()) {
          setBytesPerCrc(other.getBytesPerCrc());
        }
        if (other.hasCrcPerBlock()) {
          setCrcPerBlock(other.getCrcPerBlock());
        }
        if (other.hasBlockChecksum()) {
          setBlockChecksum(other.getBlockChecksum());
        }
        if (other.hasCrcType()) {
          setCrcType(other.getCrcType());
        }
        if (other.hasBlockChecksumOptions()) {
          mergeBlockChecksumOptions(other.getBlockChecksumOptions());
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasBytesPerCrc()) {
          return false;
        }
        if (!hasCrcPerBlock()) {
          return false;
        }
        if (!hasBlockChecksum()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 8: {
                bytesPerCrc_ = input.readUInt32();
                bitField0_ |= 0x00000001;
                break;
              } // case 8
              case 16: {
                crcPerBlock_ = input.readUInt64();
                bitField0_ |= 0x00000002;
                break;
              } // case 16
              case 26: {
                blockChecksum_ = input.readBytes();
                bitField0_ |= 0x00000004;
                break;
              } // case 26
              case 32: {
                int tmpRaw = input.readEnum();
                org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto tmpValue =
                    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(tmpRaw);
                if (tmpValue == null) {
                  mergeUnknownVarintField(4, tmpRaw);
                } else {
                  crcType_ = tmpRaw;
                  bitField0_ |= 0x00000008;
                }
                break;
              } // case 32
              case 42: {
                input.readMessage(
                    getBlockChecksumOptionsFieldBuilder().getBuilder(),
                    extensionRegistry);
                bitField0_ |= 0x00000010;
                break;
              } // case 42
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private int bytesPerCrc_ ;
      /**
       * <code>required uint32 bytesPerCrc = 1;</code>
       * @return Whether the bytesPerCrc field is set.
       */
      @java.lang.Override
      public boolean hasBytesPerCrc() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required uint32 bytesPerCrc = 1;</code>
       * @return The bytesPerCrc.
       */
      @java.lang.Override
      public int getBytesPerCrc() {
        return bytesPerCrc_;
      }
      /**
       * <code>required uint32 bytesPerCrc = 1;</code>
       * @param value The bytesPerCrc to set.
       * @return This builder for chaining.
       */
      public Builder setBytesPerCrc(int value) {

        bytesPerCrc_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required uint32 bytesPerCrc = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearBytesPerCrc() {
        bitField0_ = (bitField0_ & ~0x00000001);
        bytesPerCrc_ = 0;
        onChanged();
        return this;
      }

      private long crcPerBlock_ ;
      /**
       * <code>required uint64 crcPerBlock = 2;</code>
       * @return Whether the crcPerBlock field is set.
       */
      @java.lang.Override
      public boolean hasCrcPerBlock() {
        return ((bitField0_ & 0x00000002) != 0);
      }
      /**
       * <code>required uint64 crcPerBlock = 2;</code>
       * @return The crcPerBlock.
       */
      @java.lang.Override
      public long getCrcPerBlock() {
        return crcPerBlock_;
      }
      /**
       * <code>required uint64 crcPerBlock = 2;</code>
       * @param value The crcPerBlock to set.
       * @return This builder for chaining.
       */
      public Builder setCrcPerBlock(long value) {

        crcPerBlock_ = value;
        bitField0_ |= 0x00000002;
        onChanged();
        return this;
      }
      /**
       * <code>required uint64 crcPerBlock = 2;</code>
       * @return This builder for chaining.
       */
      public Builder clearCrcPerBlock() {
        bitField0_ = (bitField0_ & ~0x00000002);
        crcPerBlock_ = 0L;
        onChanged();
        return this;
      }

      private org.apache.hadoop.thirdparty.protobuf.ByteString blockChecksum_ = org.apache.hadoop.thirdparty.protobuf.ByteString.EMPTY;
      /**
       * <code>required bytes blockChecksum = 3;</code>
       * @return Whether the blockChecksum field is set.
       */
      @java.lang.Override
      public boolean hasBlockChecksum() {
        return ((bitField0_ & 0x00000004) != 0);
      }
      /**
       * <code>required bytes blockChecksum = 3;</code>
       * @return The blockChecksum.
       */
      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.ByteString getBlockChecksum() {
        return blockChecksum_;
      }
      /**
       * <code>required bytes blockChecksum = 3;</code>
       * @param value The blockChecksum to set.
       * @return This builder for chaining.
       */
      public Builder setBlockChecksum(org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        blockChecksum_ = value;
        bitField0_ |= 0x00000004;
        onChanged();
        return this;
      }
      /**
       * <code>required bytes blockChecksum = 3;</code>
       * @return This builder for chaining.
       */
      public Builder clearBlockChecksum() {
        bitField0_ = (bitField0_ & ~0x00000004);
        blockChecksum_ = getDefaultInstance().getBlockChecksum();
        onChanged();
        return this;
      }

      private int crcType_ = 0;
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
       * @return Whether the crcType field is set.
       */
      @java.lang.Override public boolean hasCrcType() {
        return ((bitField0_ & 0x00000008) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
       * @return The crcType.
       */
      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto getCrcType() {
        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto result = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.forNumber(crcType_);
        return result == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL : result;
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
       * @param value The crcType to set.
       * @return This builder for chaining.
       */
      public Builder setCrcType(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto value) {
        if (value == null) {
          throw new NullPointerException();
        }
        bitField0_ |= 0x00000008;
        crcType_ = value.getNumber();
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.ChecksumTypeProto crcType = 4;</code>
       * @return This builder for chaining.
       */
      public Builder clearCrcType() {
        bitField0_ = (bitField0_ & ~0x00000008);
        crcType_ = 0;
        onChanged();
        return this;
      }

      private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto blockChecksumOptions_;
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> blockChecksumOptionsBuilder_;
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       * @return Whether the blockChecksumOptions field is set.
       */
      public boolean hasBlockChecksumOptions() {
        return ((bitField0_ & 0x00000010) != 0);
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       * @return The blockChecksumOptions.
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto getBlockChecksumOptions() {
        if (blockChecksumOptionsBuilder_ == null) {
          return blockChecksumOptions_ == null ? org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        } else {
          return blockChecksumOptionsBuilder_.getMessage();
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public Builder setBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (value == null) {
            throw new NullPointerException();
          }
          blockChecksumOptions_ = value;
        } else {
          blockChecksumOptionsBuilder_.setMessage(value);
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public Builder setBlockChecksumOptions(
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder builderForValue) {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptions_ = builderForValue.build();
        } else {
          blockChecksumOptionsBuilder_.setMessage(builderForValue.build());
        }
        bitField0_ |= 0x00000010;
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public Builder mergeBlockChecksumOptions(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto value) {
        if (blockChecksumOptionsBuilder_ == null) {
          if (((bitField0_ & 0x00000010) != 0) &&
            blockChecksumOptions_ != null &&
            blockChecksumOptions_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance()) {
            getBlockChecksumOptionsBuilder().mergeFrom(value);
          } else {
            blockChecksumOptions_ = value;
          }
        } else {
          blockChecksumOptionsBuilder_.mergeFrom(value);
        }
        if (blockChecksumOptions_ != null) {
          bitField0_ |= 0x00000010;
          onChanged();
        }
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public Builder clearBlockChecksumOptions() {
        bitField0_ = (bitField0_ & ~0x00000010);
        blockChecksumOptions_ = null;
        if (blockChecksumOptionsBuilder_ != null) {
          blockChecksumOptionsBuilder_.dispose();
          blockChecksumOptionsBuilder_ = null;
        }
        onChanged();
        return this;
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder getBlockChecksumOptionsBuilder() {
        bitField0_ |= 0x00000010;
        onChanged();
        return getBlockChecksumOptionsFieldBuilder().getBuilder();
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder getBlockChecksumOptionsOrBuilder() {
        if (blockChecksumOptionsBuilder_ != null) {
          return blockChecksumOptionsBuilder_.getMessageOrBuilder();
        } else {
          return blockChecksumOptions_ == null ?
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.getDefaultInstance() : blockChecksumOptions_;
        }
      }
      /**
       * <code>optional .hadoop.hdfs.BlockChecksumOptionsProto blockChecksumOptions = 5;</code>
       */
      private org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder> 
          getBlockChecksumOptionsFieldBuilder() {
        if (blockChecksumOptionsBuilder_ == null) {
          blockChecksumOptionsBuilder_ = new org.apache.hadoop.thirdparty.protobuf.SingleFieldBuilderV3<
              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockChecksumOptionsProtoOrBuilder>(
                  getBlockChecksumOptions(),
                  getParentForChildren(),
                  isClean());
          blockChecksumOptions_ = null;
        }
        return blockChecksumOptionsBuilder_;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpBlockChecksumResponseProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumResponseProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpBlockChecksumResponseProto>() {
      @java.lang.Override
      public OpBlockChecksumResponseProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumResponseProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpBlockChecksumResponseProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  public interface OpCustomProtoOrBuilder extends
      // @@protoc_insertion_point(interface_extends:hadoop.hdfs.OpCustomProto)
      org.apache.hadoop.thirdparty.protobuf.MessageOrBuilder {

    /**
     * <code>required string customId = 1;</code>
     * @return Whether the customId field is set.
     */
    boolean hasCustomId();
    /**
     * <code>required string customId = 1;</code>
     * @return The customId.
     */
    java.lang.String getCustomId();
    /**
     * <code>required string customId = 1;</code>
     * @return The bytes for customId.
     */
    org.apache.hadoop.thirdparty.protobuf.ByteString
        getCustomIdBytes();
  }
  /**
   * Protobuf type {@code hadoop.hdfs.OpCustomProto}
   */
  public static final class OpCustomProto extends
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3 implements
      // @@protoc_insertion_point(message_implements:hadoop.hdfs.OpCustomProto)
      OpCustomProtoOrBuilder {
  private static final long serialVersionUID = 0L;
    // Use OpCustomProto.newBuilder() to construct.
    private OpCustomProto(org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<?> builder) {
      super(builder);
    }
    private OpCustomProto() {
      customId_ = "";
    }

    @java.lang.Override
    @SuppressWarnings({"unused"})
    protected java.lang.Object newInstance(
        UnusedPrivateParameter unused) {
      return new OpCustomProto();
    }

    public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
        getDescriptor() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
    }

    @java.lang.Override
    protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
        internalGetFieldAccessorTable() {
      return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
          .ensureFieldAccessorsInitialized(
              org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
    }

    private int bitField0_;
    public static final int CUSTOMID_FIELD_NUMBER = 1;
    @SuppressWarnings("serial")
    private volatile java.lang.Object customId_ = "";
    /**
     * <code>required string customId = 1;</code>
     * @return Whether the customId field is set.
     */
    @java.lang.Override
    public boolean hasCustomId() {
      return ((bitField0_ & 0x00000001) != 0);
    }
    /**
     * <code>required string customId = 1;</code>
     * @return The customId.
     */
    @java.lang.Override
    public java.lang.String getCustomId() {
      java.lang.Object ref = customId_;
      if (ref instanceof java.lang.String) {
        return (java.lang.String) ref;
      } else {
        org.apache.hadoop.thirdparty.protobuf.ByteString bs = 
            (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        java.lang.String s = bs.toStringUtf8();
        if (bs.isValidUtf8()) {
          customId_ = s;
        }
        return s;
      }
    }
    /**
     * <code>required string customId = 1;</code>
     * @return The bytes for customId.
     */
    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.ByteString
        getCustomIdBytes() {
      java.lang.Object ref = customId_;
      if (ref instanceof java.lang.String) {
        org.apache.hadoop.thirdparty.protobuf.ByteString b = 
            org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                (java.lang.String) ref);
        customId_ = b;
        return b;
      } else {
        return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
      }
    }

    private byte memoizedIsInitialized = -1;
    @java.lang.Override
    public final boolean isInitialized() {
      byte isInitialized = memoizedIsInitialized;
      if (isInitialized == 1) return true;
      if (isInitialized == 0) return false;

      if (!hasCustomId()) {
        memoizedIsInitialized = 0;
        return false;
      }
      memoizedIsInitialized = 1;
      return true;
    }

    @java.lang.Override
    public void writeTo(org.apache.hadoop.thirdparty.protobuf.CodedOutputStream output)
                        throws java.io.IOException {
      if (((bitField0_ & 0x00000001) != 0)) {
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.writeString(output, 1, customId_);
      }
      getUnknownFields().writeTo(output);
    }

    @java.lang.Override
    public int getSerializedSize() {
      int size = memoizedSize;
      if (size != -1) return size;

      size = 0;
      if (((bitField0_ & 0x00000001) != 0)) {
        size += org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.computeStringSize(1, customId_);
      }
      size += getUnknownFields().getSerializedSize();
      memoizedSize = size;
      return size;
    }

    @java.lang.Override
    public boolean equals(final java.lang.Object obj) {
      if (obj == this) {
       return true;
      }
      if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)) {
        return super.equals(obj);
      }
      org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other = (org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) obj;

      if (hasCustomId() != other.hasCustomId()) return false;
      if (hasCustomId()) {
        if (!getCustomId()
            .equals(other.getCustomId())) return false;
      }
      if (!getUnknownFields().equals(other.getUnknownFields())) return false;
      return true;
    }

    @java.lang.Override
    public int hashCode() {
      if (memoizedHashCode != 0) {
        return memoizedHashCode;
      }
      int hash = 41;
      hash = (19 * hash) + getDescriptor().hashCode();
      if (hasCustomId()) {
        hash = (37 * hash) + CUSTOMID_FIELD_NUMBER;
        hash = (53 * hash) + getCustomId().hashCode();
      }
      hash = (29 * hash) + getUnknownFields().hashCode();
      memoizedHashCode = hash;
      return hash;
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        java.nio.ByteBuffer data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        java.nio.ByteBuffer data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.ByteString data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(byte[] data)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        byte[] data,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
      return PARSER.parseFrom(data, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(java.io.InputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input);
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseDelimitedFrom(
        java.io.InputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input);
    }
    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto parseFrom(
        org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
        org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
        throws java.io.IOException {
      return org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3
          .parseWithIOException(PARSER, input, extensionRegistry);
    }

    @java.lang.Override
    public Builder newBuilderForType() { return newBuilder(); }
    public static Builder newBuilder() {
      return DEFAULT_INSTANCE.toBuilder();
    }
    public static Builder newBuilder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto prototype) {
      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
    }
    @java.lang.Override
    public Builder toBuilder() {
      return this == DEFAULT_INSTANCE
          ? new Builder() : new Builder().mergeFrom(this);
    }

    @java.lang.Override
    protected Builder newBuilderForType(
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
      Builder builder = new Builder(parent);
      return builder;
    }
    /**
     * Protobuf type {@code hadoop.hdfs.OpCustomProto}
     */
    public static final class Builder extends
        org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.Builder<Builder> implements
        // @@protoc_insertion_point(builder_implements:hadoop.hdfs.OpCustomProto)
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProtoOrBuilder {
      public static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptor() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
      }

      @java.lang.Override
      protected org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
          internalGetFieldAccessorTable() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable
            .ensureFieldAccessorsInitialized(
                org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.class, org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.Builder.class);
      }

      // Construct using org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.newBuilder()
      private Builder() {

      }

      private Builder(
          org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.BuilderParent parent) {
        super(parent);

      }
      @java.lang.Override
      public Builder clear() {
        super.clear();
        bitField0_ = 0;
        customId_ = "";
        return this;
      }

      @java.lang.Override
      public org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
          getDescriptorForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.internal_static_hadoop_hdfs_OpCustomProto_descriptor;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
        return org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance();
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto build() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = buildPartial();
        if (!result.isInitialized()) {
          throw newUninitializedMessageException(result);
        }
        return result;
      }

      @java.lang.Override
      public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto buildPartial() {
        org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto(this);
        if (bitField0_ != 0) { buildPartial0(result); }
        onBuilt();
        return result;
      }

      private void buildPartial0(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto result) {
        int from_bitField0_ = bitField0_;
        int to_bitField0_ = 0;
        if (((from_bitField0_ & 0x00000001) != 0)) {
          result.customId_ = customId_;
          to_bitField0_ |= 0x00000001;
        }
        result.bitField0_ |= to_bitField0_;
      }

      @java.lang.Override
      public Builder clone() {
        return super.clone();
      }
      @java.lang.Override
      public Builder setField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.setField(field, value);
      }
      @java.lang.Override
      public Builder clearField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field) {
        return super.clearField(field);
      }
      @java.lang.Override
      public Builder clearOneof(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.OneofDescriptor oneof) {
        return super.clearOneof(oneof);
      }
      @java.lang.Override
      public Builder setRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          int index, java.lang.Object value) {
        return super.setRepeatedField(field, index, value);
      }
      @java.lang.Override
      public Builder addRepeatedField(
          org.apache.hadoop.thirdparty.protobuf.Descriptors.FieldDescriptor field,
          java.lang.Object value) {
        return super.addRepeatedField(field, value);
      }
      @java.lang.Override
      public Builder mergeFrom(org.apache.hadoop.thirdparty.protobuf.Message other) {
        if (other instanceof org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto) {
          return mergeFrom((org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto)other);
        } else {
          super.mergeFrom(other);
          return this;
        }
      }

      public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto other) {
        if (other == org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto.getDefaultInstance()) return this;
        if (other.hasCustomId()) {
          customId_ = other.customId_;
          bitField0_ |= 0x00000001;
          onChanged();
        }
        this.mergeUnknownFields(other.getUnknownFields());
        onChanged();
        return this;
      }

      @java.lang.Override
      public final boolean isInitialized() {
        if (!hasCustomId()) {
          return false;
        }
        return true;
      }

      @java.lang.Override
      public Builder mergeFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws java.io.IOException {
        if (extensionRegistry == null) {
          throw new java.lang.NullPointerException();
        }
        try {
          boolean done = false;
          while (!done) {
            int tag = input.readTag();
            switch (tag) {
              case 0:
                done = true;
                break;
              case 10: {
                customId_ = input.readBytes();
                bitField0_ |= 0x00000001;
                break;
              } // case 10
              default: {
                if (!super.parseUnknownField(input, extensionRegistry, tag)) {
                  done = true; // was an endgroup tag
                }
                break;
              } // default:
            } // switch (tag)
          } // while (!done)
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.unwrapIOException();
        } finally {
          onChanged();
        } // finally
        return this;
      }
      private int bitField0_;

      private java.lang.Object customId_ = "";
      /**
       * <code>required string customId = 1;</code>
       * @return Whether the customId field is set.
       */
      public boolean hasCustomId() {
        return ((bitField0_ & 0x00000001) != 0);
      }
      /**
       * <code>required string customId = 1;</code>
       * @return The customId.
       */
      public java.lang.String getCustomId() {
        java.lang.Object ref = customId_;
        if (!(ref instanceof java.lang.String)) {
          org.apache.hadoop.thirdparty.protobuf.ByteString bs =
              (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
          java.lang.String s = bs.toStringUtf8();
          if (bs.isValidUtf8()) {
            customId_ = s;
          }
          return s;
        } else {
          return (java.lang.String) ref;
        }
      }
      /**
       * <code>required string customId = 1;</code>
       * @return The bytes for customId.
       */
      public org.apache.hadoop.thirdparty.protobuf.ByteString
          getCustomIdBytes() {
        java.lang.Object ref = customId_;
        if (ref instanceof String) {
          org.apache.hadoop.thirdparty.protobuf.ByteString b = 
              org.apache.hadoop.thirdparty.protobuf.ByteString.copyFromUtf8(
                  (java.lang.String) ref);
          customId_ = b;
          return b;
        } else {
          return (org.apache.hadoop.thirdparty.protobuf.ByteString) ref;
        }
      }
      /**
       * <code>required string customId = 1;</code>
       * @param value The customId to set.
       * @return This builder for chaining.
       */
      public Builder setCustomId(
          java.lang.String value) {
        if (value == null) { throw new NullPointerException(); }
        customId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      /**
       * <code>required string customId = 1;</code>
       * @return This builder for chaining.
       */
      public Builder clearCustomId() {
        customId_ = getDefaultInstance().getCustomId();
        bitField0_ = (bitField0_ & ~0x00000001);
        onChanged();
        return this;
      }
      /**
       * <code>required string customId = 1;</code>
       * @param value The bytes for customId to set.
       * @return This builder for chaining.
       */
      public Builder setCustomIdBytes(
          org.apache.hadoop.thirdparty.protobuf.ByteString value) {
        if (value == null) { throw new NullPointerException(); }
        customId_ = value;
        bitField0_ |= 0x00000001;
        onChanged();
        return this;
      }
      @java.lang.Override
      public final Builder setUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.setUnknownFields(unknownFields);
      }

      @java.lang.Override
      public final Builder mergeUnknownFields(
          final org.apache.hadoop.thirdparty.protobuf.UnknownFieldSet unknownFields) {
        return super.mergeUnknownFields(unknownFields);
      }


      // @@protoc_insertion_point(builder_scope:hadoop.hdfs.OpCustomProto)
    }

    // @@protoc_insertion_point(class_scope:hadoop.hdfs.OpCustomProto)
    private static final org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto DEFAULT_INSTANCE;
    static {
      DEFAULT_INSTANCE = new org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto();
    }

    public static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstance() {
      return DEFAULT_INSTANCE;
    }

    @java.lang.Deprecated public static final org.apache.hadoop.thirdparty.protobuf.Parser<OpCustomProto>
        PARSER = new org.apache.hadoop.thirdparty.protobuf.AbstractParser<OpCustomProto>() {
      @java.lang.Override
      public OpCustomProto parsePartialFrom(
          org.apache.hadoop.thirdparty.protobuf.CodedInputStream input,
          org.apache.hadoop.thirdparty.protobuf.ExtensionRegistryLite extensionRegistry)
          throws org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException {
        Builder builder = newBuilder();
        try {
          builder.mergeFrom(input, extensionRegistry);
        } catch (org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException e) {
          throw e.setUnfinishedMessage(builder.buildPartial());
        } catch (org.apache.hadoop.thirdparty.protobuf.UninitializedMessageException e) {
          throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
        } catch (java.io.IOException e) {
          throw new org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException(e)
              .setUnfinishedMessage(builder.buildPartial());
        }
        return builder.buildPartial();
      }
    };

    public static org.apache.hadoop.thirdparty.protobuf.Parser<OpCustomProto> parser() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.thirdparty.protobuf.Parser<OpCustomProto> getParserForType() {
      return PARSER;
    }

    @java.lang.Override
    public org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCustomProto getDefaultInstanceForType() {
      return DEFAULT_INSTANCE;
    }

  }

  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BaseHeaderProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_CachingStrategyProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpReadBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ChecksumProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_PacketHeaderProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_PipelineAckProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_DNTransferAckProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable;
  private static final org.apache.hadoop.thirdparty.protobuf.Descriptors.Descriptor
    internal_static_hadoop_hdfs_OpCustomProto_descriptor;
  private static final 
    org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable
      internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable;

  public static org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      getDescriptor() {
    return descriptor;
  }
  private static  org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      descriptor;
  static {
    java.lang.String[] descriptorData = {
      "\n\022datatransfer.proto\022\013hadoop.hdfs\032\016Secur" +
      "ity.proto\032\nhdfs.proto\"\373\002\n!DataTransferEn" +
      "cryptorMessageProto\022Z\n\006status\030\001 \002(\0162J.ha" +
      "doop.hdfs.DataTransferEncryptorMessagePr" +
      "oto.DataTransferEncryptorStatus\022\017\n\007paylo" +
      "ad\030\002 \001(\014\022\017\n\007message\030\003 \001(\t\0224\n\014cipherOptio" +
      "n\030\004 \003(\0132\036.hadoop.hdfs.CipherOptionProto\022" +
      ":\n\017handshakeSecret\030\005 \001(\0132!.hadoop.hdfs.H" +
      "andshakeSecretProto\022\030\n\020accessTokenError\030" +
      "\006 \001(\010\"L\n\033DataTransferEncryptorStatus\022\013\n\007" +
      "SUCCESS\020\000\022\025\n\021ERROR_UNKNOWN_KEY\020\001\022\t\n\005ERRO" +
      "R\020\002\"4\n\024HandshakeSecretProto\022\016\n\006secret\030\001 " +
      "\002(\014\022\014\n\004bpid\030\002 \002(\t\"\247\001\n\017BaseHeaderProto\022.\n" +
      "\005block\030\001 \002(\0132\037.hadoop.hdfs.ExtendedBlock" +
      "Proto\022(\n\005token\030\002 \001(\0132\031.hadoop.common.Tok" +
      "enProto\022:\n\ttraceInfo\030\003 \001(\0132\'.hadoop.hdfs" +
      ".DataTransferTraceInfoProto\"T\n\032DataTrans" +
      "ferTraceInfoProto\022\017\n\007traceId\030\001 \001(\004\022\020\n\010pa" +
      "rentId\030\002 \001(\004\022\023\n\013spanContext\030\003 \001(\014\"b\n\032Cli" +
      "entOperationHeaderProto\0220\n\nbaseHeader\030\001 " +
      "\002(\0132\034.hadoop.hdfs.BaseHeaderProto\022\022\n\ncli" +
      "entName\030\002 \002(\t\"=\n\024CachingStrategyProto\022\022\n" +
      "\ndropBehind\030\001 \001(\010\022\021\n\treadahead\030\002 \001(\003\"\301\001\n" +
      "\020OpReadBlockProto\0227\n\006header\030\001 \002(\0132\'.hado" +
      "op.hdfs.ClientOperationHeaderProto\022\016\n\006of" +
      "fset\030\002 \002(\004\022\013\n\003len\030\003 \002(\004\022\033\n\rsendChecksums" +
      "\030\004 \001(\010:\004true\022:\n\017cachingStrategy\030\005 \001(\0132!." +
      "hadoop.hdfs.CachingStrategyProto\"W\n\rChec" +
      "ksumProto\022,\n\004type\030\001 \002(\0162\036.hadoop.hdfs.Ch" +
      "ecksumTypeProto\022\030\n\020bytesPerChecksum\030\002 \002(" +
      "\r\"\305\007\n\021OpWriteBlockProto\0227\n\006header\030\001 \002(\0132" +
      "\'.hadoop.hdfs.ClientOperationHeaderProto" +
      "\022/\n\007targets\030\002 \003(\0132\036.hadoop.hdfs.Datanode" +
      "InfoProto\022.\n\006source\030\003 \001(\0132\036.hadoop.hdfs." +
      "DatanodeInfoProto\022D\n\005stage\030\004 \002(\01625.hadoo" +
      "p.hdfs.OpWriteBlockProto.BlockConstructi" +
      "onStage\022\024\n\014pipelineSize\030\005 \002(\r\022\024\n\014minByte" +
      "sRcvd\030\006 \002(\004\022\024\n\014maxBytesRcvd\030\007 \002(\004\022\035\n\025lat" +
      "estGenerationStamp\030\010 \002(\004\0225\n\021requestedChe" +
      "cksum\030\t \002(\0132\032.hadoop.hdfs.ChecksumProto\022" +
      ":\n\017cachingStrategy\030\n \001(\0132!.hadoop.hdfs.C" +
      "achingStrategyProto\0228\n\013storageType\030\013 \001(\016" +
      "2\035.hadoop.hdfs.StorageTypeProto:\004DISK\0229\n" +
      "\022targetStorageTypes\030\014 \003(\0162\035.hadoop.hdfs." +
      "StorageTypeProto\022\037\n\020allowLazyPersist\030\r \001" +
      "(\010:\005false\022\026\n\007pinning\030\016 \001(\010:\005false\022\026\n\016tar" +
      "getPinnings\030\017 \003(\010\022\021\n\tstorageId\030\020 \001(\t\022\030\n\020" +
      "targetStorageIds\030\021 \003(\t\"\210\002\n\026BlockConstruc" +
      "tionStage\022\031\n\025PIPELINE_SETUP_APPEND\020\000\022\"\n\036" +
      "PIPELINE_SETUP_APPEND_RECOVERY\020\001\022\022\n\016DATA" +
      "_STREAMING\020\002\022%\n!PIPELINE_SETUP_STREAMING" +
      "_RECOVERY\020\003\022\022\n\016PIPELINE_CLOSE\020\004\022\033\n\027PIPEL" +
      "INE_CLOSE_RECOVERY\020\005\022\031\n\025PIPELINE_SETUP_C" +
      "REATE\020\006\022\020\n\014TRANSFER_RBW\020\007\022\026\n\022TRANSFER_FI" +
      "NALIZED\020\010\"\325\001\n\024OpTransferBlockProto\0227\n\006he" +
      "ader\030\001 \002(\0132\'.hadoop.hdfs.ClientOperation" +
      "HeaderProto\022/\n\007targets\030\002 \003(\0132\036.hadoop.hd" +
      "fs.DatanodeInfoProto\0229\n\022targetStorageTyp" +
      "es\030\003 \003(\0162\035.hadoop.hdfs.StorageTypeProto\022" +
      "\030\n\020targetStorageIds\030\004 \003(\t\"\321\001\n\023OpReplaceB" +
      "lockProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs." +
      "BaseHeaderProto\022\017\n\007delHint\030\002 \002(\t\022.\n\006sour" +
      "ce\030\003 \002(\0132\036.hadoop.hdfs.DatanodeInfoProto" +
      "\0228\n\013storageType\030\004 \001(\0162\035.hadoop.hdfs.Stor" +
      "ageTypeProto:\004DISK\022\021\n\tstorageId\030\005 \001(\t\"@\n" +
      "\020OpCopyBlockProto\022,\n\006header\030\001 \002(\0132\034.hado" +
      "op.hdfs.BaseHeaderProto\"\212\001\n\024OpBlockCheck" +
      "sumProto\022,\n\006header\030\001 \002(\0132\034.hadoop.hdfs.B" +
      "aseHeaderProto\022D\n\024blockChecksumOptions\030\002" +
      " \001(\0132&.hadoop.hdfs.BlockChecksumOptionsP" +
      "roto\"\335\002\n\031OpBlockGroupChecksumProto\022,\n\006he" +
      "ader\030\001 \002(\0132\034.hadoop.hdfs.BaseHeaderProto" +
      "\0222\n\tdatanodes\030\002 \002(\0132\037.hadoop.hdfs.Datano" +
      "deInfosProto\022.\n\013blockTokens\030\003 \003(\0132\031.hado" +
      "op.common.TokenProto\0227\n\010ecPolicy\030\004 \002(\0132%" +
      ".hadoop.hdfs.ErasureCodingPolicyProto\022\024\n" +
      "\014blockIndices\030\005 \003(\r\022\031\n\021requestedNumBytes" +
      "\030\006 \002(\004\022D\n\024blockChecksumOptions\030\007 \001(\0132&.h" +
      "adoop.hdfs.BlockChecksumOptionsProto\"0\n\026" +
      "ShortCircuitShmIdProto\022\n\n\002hi\030\001 \002(\003\022\n\n\002lo" +
      "\030\002 \002(\003\"_\n\030ShortCircuitShmSlotProto\0222\n\005sh" +
      "mId\030\001 \002(\0132#.hadoop.hdfs.ShortCircuitShmI" +
      "dProto\022\017\n\007slotIdx\030\002 \002(\005\"\307\001\n OpRequestSho" +
      "rtCircuitAccessProto\022,\n\006header\030\001 \002(\0132\034.h" +
      "adoop.hdfs.BaseHeaderProto\022\022\n\nmaxVersion" +
      "\030\002 \002(\r\0225\n\006slotId\030\003 \001(\0132%.hadoop.hdfs.Sho" +
      "rtCircuitShmSlotProto\022*\n\033supportsReceipt" +
      "Verification\030\004 \001(\010:\005false\"\232\001\n%ReleaseSho" +
      "rtCircuitAccessRequestProto\0225\n\006slotId\030\001 " +
      "\002(\0132%.hadoop.hdfs.ShortCircuitShmSlotPro" +
      "to\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs.Data" +
      "TransferTraceInfoProto\"\\\n&ReleaseShortCi" +
      "rcuitAccessResponseProto\022#\n\006status\030\001 \002(\016" +
      "2\023.hadoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\"m\n\033" +
      "ShortCircuitShmRequestProto\022\022\n\nclientNam" +
      "e\030\001 \002(\t\022:\n\ttraceInfo\030\002 \001(\0132\'.hadoop.hdfs" +
      ".DataTransferTraceInfoProto\"\203\001\n\034ShortCir" +
      "cuitShmResponseProto\022#\n\006status\030\001 \002(\0162\023.h" +
      "adoop.hdfs.Status\022\r\n\005error\030\002 \001(\t\022/\n\002id\030\003" +
      " \001(\0132#.hadoop.hdfs.ShortCircuitShmIdProt" +
      "o\"\177\n\021PacketHeaderProto\022\025\n\roffsetInBlock\030" +
      "\001 \002(\020\022\r\n\005seqno\030\002 \002(\020\022\031\n\021lastPacketInBloc" +
      "k\030\003 \002(\010\022\017\n\007dataLen\030\004 \002(\017\022\030\n\tsyncBlock\030\005 " +
      "\001(\010:\005false\"z\n\020PipelineAckProto\022\r\n\005seqno\030" +
      "\001 \002(\022\022\"\n\005reply\030\002 \003(\0162\023.hadoop.hdfs.Statu" +
      "s\022!\n\026downstreamAckTimeNanos\030\003 \001(\004:\0010\022\020\n\004" +
      "flag\030\004 \003(\rB\002\020\001\"\\\n\027ReadOpChecksumInfoProt" +
      "o\022,\n\010checksum\030\001 \002(\0132\032.hadoop.hdfs.Checks" +
      "umProto\022\023\n\013chunkOffset\030\002 \002(\004\"\214\002\n\024BlockOp" +
      "ResponseProto\022#\n\006status\030\001 \002(\0162\023.hadoop.h" +
      "dfs.Status\022\024\n\014firstBadLink\030\002 \001(\t\022C\n\020chec" +
      "ksumResponse\030\003 \001(\0132).hadoop.hdfs.OpBlock" +
      "ChecksumResponseProto\022@\n\022readOpChecksumI" +
      "nfo\030\004 \001(\0132$.hadoop.hdfs.ReadOpChecksumIn" +
      "foProto\022\017\n\007message\030\005 \001(\t\022!\n\031shortCircuit" +
      "AccessVersion\030\006 \001(\r\"<\n\025ClientReadStatusP" +
      "roto\022#\n\006status\030\001 \002(\0162\023.hadoop.hdfs.Statu" +
      "s\"9\n\022DNTransferAckProto\022#\n\006status\030\001 \002(\0162" +
      "\023.hadoop.hdfs.Status\"\326\001\n\034OpBlockChecksum" +
      "ResponseProto\022\023\n\013bytesPerCrc\030\001 \002(\r\022\023\n\013cr" +
      "cPerBlock\030\002 \002(\004\022\025\n\rblockChecksum\030\003 \002(\014\022/" +
      "\n\007crcType\030\004 \001(\0162\036.hadoop.hdfs.ChecksumTy" +
      "peProto\022D\n\024blockChecksumOptions\030\005 \001(\0132&." +
      "hadoop.hdfs.BlockChecksumOptionsProto\"!\n" +
      "\rOpCustomProto\022\020\n\010customId\030\001 \002(\t*\214\002\n\006Sta" +
      "tus\022\013\n\007SUCCESS\020\000\022\t\n\005ERROR\020\001\022\022\n\016ERROR_CHE" +
      "CKSUM\020\002\022\021\n\rERROR_INVALID\020\003\022\020\n\014ERROR_EXIS" +
      "TS\020\004\022\026\n\022ERROR_ACCESS_TOKEN\020\005\022\017\n\013CHECKSUM" +
      "_OK\020\006\022\025\n\021ERROR_UNSUPPORTED\020\007\022\017\n\013OOB_REST" +
      "ART\020\010\022\021\n\rOOB_RESERVED1\020\t\022\021\n\rOOB_RESERVED" +
      "2\020\n\022\021\n\rOOB_RESERVED3\020\013\022\017\n\013IN_PROGRESS\020\014\022" +
      "\026\n\022ERROR_BLOCK_PINNED\020\r*[\n\026ShortCircuitF" +
      "dResponse\022#\n\037DO_NOT_USE_RECEIPT_VERIFICA" +
      "TION\020\000\022\034\n\030USE_RECEIPT_VERIFICATION\020\001B>\n%" +
      "org.apache.hadoop.hdfs.protocol.protoB\022D" +
      "ataTransferProtos\240\001\001"
    };
    descriptor = org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor
      .internalBuildGeneratedFileFrom(descriptorData,
        new org.apache.hadoop.thirdparty.protobuf.Descriptors.FileDescriptor[] {
          org.apache.hadoop.security.proto.SecurityProtos.getDescriptor(),
          org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
        });
    internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor =
      getDescriptor().getMessageTypes().get(0);
    internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DataTransferEncryptorMessageProto_descriptor,
        new java.lang.String[] { "Status", "Payload", "Message", "CipherOption", "HandshakeSecret", "AccessTokenError", });
    internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor =
      getDescriptor().getMessageTypes().get(1);
    internal_static_hadoop_hdfs_HandshakeSecretProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_HandshakeSecretProto_descriptor,
        new java.lang.String[] { "Secret", "Bpid", });
    internal_static_hadoop_hdfs_BaseHeaderProto_descriptor =
      getDescriptor().getMessageTypes().get(2);
    internal_static_hadoop_hdfs_BaseHeaderProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BaseHeaderProto_descriptor,
        new java.lang.String[] { "Block", "Token", "TraceInfo", });
    internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(3);
    internal_static_hadoop_hdfs_DataTransferTraceInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DataTransferTraceInfoProto_descriptor,
        new java.lang.String[] { "TraceId", "ParentId", "SpanContext", });
    internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor =
      getDescriptor().getMessageTypes().get(4);
    internal_static_hadoop_hdfs_ClientOperationHeaderProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ClientOperationHeaderProto_descriptor,
        new java.lang.String[] { "BaseHeader", "ClientName", });
    internal_static_hadoop_hdfs_CachingStrategyProto_descriptor =
      getDescriptor().getMessageTypes().get(5);
    internal_static_hadoop_hdfs_CachingStrategyProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_CachingStrategyProto_descriptor,
        new java.lang.String[] { "DropBehind", "Readahead", });
    internal_static_hadoop_hdfs_OpReadBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(6);
    internal_static_hadoop_hdfs_OpReadBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpReadBlockProto_descriptor,
        new java.lang.String[] { "Header", "Offset", "Len", "SendChecksums", "CachingStrategy", });
    internal_static_hadoop_hdfs_ChecksumProto_descriptor =
      getDescriptor().getMessageTypes().get(7);
    internal_static_hadoop_hdfs_ChecksumProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ChecksumProto_descriptor,
        new java.lang.String[] { "Type", "BytesPerChecksum", });
    internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(8);
    internal_static_hadoop_hdfs_OpWriteBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpWriteBlockProto_descriptor,
        new java.lang.String[] { "Header", "Targets", "Source", "Stage", "PipelineSize", "MinBytesRcvd", "MaxBytesRcvd", "LatestGenerationStamp", "RequestedChecksum", "CachingStrategy", "StorageType", "TargetStorageTypes", "AllowLazyPersist", "Pinning", "TargetPinnings", "StorageId", "TargetStorageIds", });
    internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(9);
    internal_static_hadoop_hdfs_OpTransferBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpTransferBlockProto_descriptor,
        new java.lang.String[] { "Header", "Targets", "TargetStorageTypes", "TargetStorageIds", });
    internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(10);
    internal_static_hadoop_hdfs_OpReplaceBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpReplaceBlockProto_descriptor,
        new java.lang.String[] { "Header", "DelHint", "Source", "StorageType", "StorageId", });
    internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor =
      getDescriptor().getMessageTypes().get(11);
    internal_static_hadoop_hdfs_OpCopyBlockProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpCopyBlockProto_descriptor,
        new java.lang.String[] { "Header", });
    internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor =
      getDescriptor().getMessageTypes().get(12);
    internal_static_hadoop_hdfs_OpBlockChecksumProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpBlockChecksumProto_descriptor,
        new java.lang.String[] { "Header", "BlockChecksumOptions", });
    internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor =
      getDescriptor().getMessageTypes().get(13);
    internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpBlockGroupChecksumProto_descriptor,
        new java.lang.String[] { "Header", "Datanodes", "BlockTokens", "EcPolicy", "BlockIndices", "RequestedNumBytes", "BlockChecksumOptions", });
    internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor =
      getDescriptor().getMessageTypes().get(14);
    internal_static_hadoop_hdfs_ShortCircuitShmIdProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ShortCircuitShmIdProto_descriptor,
        new java.lang.String[] { "Hi", "Lo", });
    internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor =
      getDescriptor().getMessageTypes().get(15);
    internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ShortCircuitShmSlotProto_descriptor,
        new java.lang.String[] { "ShmId", "SlotIdx", });
    internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor =
      getDescriptor().getMessageTypes().get(16);
    internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpRequestShortCircuitAccessProto_descriptor,
        new java.lang.String[] { "Header", "MaxVersion", "SlotId", "SupportsReceiptVerification", });
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(17);
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ReleaseShortCircuitAccessRequestProto_descriptor,
        new java.lang.String[] { "SlotId", "TraceInfo", });
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(18);
    internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ReleaseShortCircuitAccessResponseProto_descriptor,
        new java.lang.String[] { "Status", "Error", });
    internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor =
      getDescriptor().getMessageTypes().get(19);
    internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ShortCircuitShmRequestProto_descriptor,
        new java.lang.String[] { "ClientName", "TraceInfo", });
    internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(20);
    internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ShortCircuitShmResponseProto_descriptor,
        new java.lang.String[] { "Status", "Error", "Id", });
    internal_static_hadoop_hdfs_PacketHeaderProto_descriptor =
      getDescriptor().getMessageTypes().get(21);
    internal_static_hadoop_hdfs_PacketHeaderProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_PacketHeaderProto_descriptor,
        new java.lang.String[] { "OffsetInBlock", "Seqno", "LastPacketInBlock", "DataLen", "SyncBlock", });
    internal_static_hadoop_hdfs_PipelineAckProto_descriptor =
      getDescriptor().getMessageTypes().get(22);
    internal_static_hadoop_hdfs_PipelineAckProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_PipelineAckProto_descriptor,
        new java.lang.String[] { "Seqno", "Reply", "DownstreamAckTimeNanos", "Flag", });
    internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor =
      getDescriptor().getMessageTypes().get(23);
    internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ReadOpChecksumInfoProto_descriptor,
        new java.lang.String[] { "Checksum", "ChunkOffset", });
    internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(24);
    internal_static_hadoop_hdfs_BlockOpResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_BlockOpResponseProto_descriptor,
        new java.lang.String[] { "Status", "FirstBadLink", "ChecksumResponse", "ReadOpChecksumInfo", "Message", "ShortCircuitAccessVersion", });
    internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor =
      getDescriptor().getMessageTypes().get(25);
    internal_static_hadoop_hdfs_ClientReadStatusProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_ClientReadStatusProto_descriptor,
        new java.lang.String[] { "Status", });
    internal_static_hadoop_hdfs_DNTransferAckProto_descriptor =
      getDescriptor().getMessageTypes().get(26);
    internal_static_hadoop_hdfs_DNTransferAckProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_DNTransferAckProto_descriptor,
        new java.lang.String[] { "Status", });
    internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor =
      getDescriptor().getMessageTypes().get(27);
    internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpBlockChecksumResponseProto_descriptor,
        new java.lang.String[] { "BytesPerCrc", "CrcPerBlock", "BlockChecksum", "CrcType", "BlockChecksumOptions", });
    internal_static_hadoop_hdfs_OpCustomProto_descriptor =
      getDescriptor().getMessageTypes().get(28);
    internal_static_hadoop_hdfs_OpCustomProto_fieldAccessorTable = new
      org.apache.hadoop.thirdparty.protobuf.GeneratedMessageV3.FieldAccessorTable(
        internal_static_hadoop_hdfs_OpCustomProto_descriptor,
        new java.lang.String[] { "CustomId", });
    org.apache.hadoop.security.proto.SecurityProtos.getDescriptor();
    org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor();
  }

  // @@protoc_insertion_point(outer_class_scope)
}